File: | dev/pci/drm/radeon/radeon_object.c |
Warning: | line 638, column 3 Value stored to 'reg' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the |
7 | * "Software"), to deal in the Software without restriction, including |
8 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * the following conditions: |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
20 | * |
21 | * The above copyright notice and this permission notice (including the |
22 | * next paragraph) shall be included in all copies or substantial portions |
23 | * of the Software. |
24 | * |
25 | */ |
26 | /* |
27 | * Authors: |
28 | * Jerome Glisse <glisse@freedesktop.org> |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
30 | * Dave Airlie |
31 | */ |
32 | |
33 | #include <linux/io.h> |
34 | #include <linux/list.h> |
35 | #include <linux/slab.h> |
36 | |
37 | #include <drm/drm_cache.h> |
38 | #include <drm/drm_prime.h> |
39 | #include <drm/radeon_drm.h> |
40 | |
41 | #include "radeon.h" |
42 | #include "radeon_trace.h" |
43 | |
44 | int radeon_ttm_init(struct radeon_device *rdev); |
45 | void radeon_ttm_fini(struct radeon_device *rdev); |
46 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); |
47 | |
48 | /* |
49 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all |
50 | * function are calling it. |
51 | */ |
52 | |
53 | static void radeon_update_memory_usage(struct radeon_bo *bo, |
54 | unsigned mem_type, int sign) |
55 | { |
56 | struct radeon_device *rdev = bo->rdev; |
57 | u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT12; |
58 | |
59 | switch (mem_type) { |
60 | case TTM_PL_TT1: |
61 | if (sign > 0) |
62 | atomic64_add(size, &rdev->gtt_usage)__sync_fetch_and_add_8(&rdev->gtt_usage, size); |
63 | else |
64 | atomic64_sub(size, &rdev->gtt_usage)__sync_fetch_and_sub_8(&rdev->gtt_usage, size); |
65 | break; |
66 | case TTM_PL_VRAM2: |
67 | if (sign > 0) |
68 | atomic64_add(size, &rdev->vram_usage)__sync_fetch_and_add_8(&rdev->vram_usage, size); |
69 | else |
70 | atomic64_sub(size, &rdev->vram_usage)__sync_fetch_and_sub_8(&rdev->vram_usage, size); |
71 | break; |
72 | } |
73 | } |
74 | |
75 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
76 | { |
77 | struct radeon_bo *bo; |
78 | |
79 | bo = container_of(tbo, struct radeon_bo, tbo)({ const __typeof( ((struct radeon_bo *)0)->tbo ) *__mptr = (tbo); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof (struct radeon_bo, tbo) );}); |
80 | |
81 | radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); |
82 | |
83 | mutex_lock(&bo->rdev->gem.mutex)rw_enter_write(&bo->rdev->gem.mutex); |
84 | list_del_init(&bo->list); |
85 | mutex_unlock(&bo->rdev->gem.mutex)rw_exit_write(&bo->rdev->gem.mutex); |
86 | radeon_bo_clear_surface_reg(bo); |
87 | WARN_ON_ONCE(!list_empty(&bo->va))({ static int __warned; int __ret = !!(!list_empty(&bo-> va)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!list_empty(&bo->va)", "/usr/src/sys/dev/pci/drm/radeon/radeon_object.c" , 87); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
88 | if (bo->tbo.base.import_attach) |
89 | drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); |
90 | drm_gem_object_release(&bo->tbo.base); |
91 | pool_put(&bo->rdev->ddev->objpl, bo); |
92 | } |
93 | |
94 | bool_Bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
95 | { |
96 | if (bo->destroy == &radeon_ttm_bo_destroy) |
97 | return true1; |
98 | return false0; |
99 | } |
100 | |
101 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
102 | { |
103 | u32 c = 0, i; |
104 | |
105 | rbo->placement.placement = rbo->placements; |
106 | rbo->placement.busy_placement = rbo->placements; |
107 | if (domain & RADEON_GEM_DOMAIN_VRAM0x4) { |
108 | /* Try placing BOs which don't need CPU access outside of the |
109 | * CPU accessible part of VRAM |
110 | */ |
111 | if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS(1 << 4)) && |
112 | rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { |
113 | rbo->placements[c].fpfn = |
114 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT12; |
115 | rbo->placements[c].mem_type = TTM_PL_VRAM2; |
116 | rbo->placements[c++].flags = TTM_PL_FLAG_WC(1 << 18) | |
117 | TTM_PL_FLAG_UNCACHED(1 << 17); |
118 | } |
119 | |
120 | rbo->placements[c].fpfn = 0; |
121 | rbo->placements[c].mem_type = TTM_PL_VRAM2; |
122 | rbo->placements[c++].flags = TTM_PL_FLAG_WC(1 << 18) | |
123 | TTM_PL_FLAG_UNCACHED(1 << 17); |
124 | } |
125 | |
126 | if (domain & RADEON_GEM_DOMAIN_GTT0x2) { |
127 | if (rbo->flags & RADEON_GEM_GTT_UC(1 << 1)) { |
128 | rbo->placements[c].fpfn = 0; |
129 | rbo->placements[c].mem_type = TTM_PL_TT1; |
130 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED(1 << 17); |
131 | |
132 | } else if ((rbo->flags & RADEON_GEM_GTT_WC(1 << 2)) || |
133 | (rbo->rdev->flags & RADEON_IS_AGP)) { |
134 | rbo->placements[c].fpfn = 0; |
135 | rbo->placements[c].mem_type = TTM_PL_TT1; |
136 | rbo->placements[c++].flags = TTM_PL_FLAG_WC(1 << 18) | |
137 | TTM_PL_FLAG_UNCACHED(1 << 17); |
138 | } else { |
139 | rbo->placements[c].fpfn = 0; |
140 | rbo->placements[c].mem_type = TTM_PL_TT1; |
141 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED(1 << 16); |
142 | } |
143 | } |
144 | |
145 | if (domain & RADEON_GEM_DOMAIN_CPU0x1) { |
146 | if (rbo->flags & RADEON_GEM_GTT_UC(1 << 1)) { |
147 | rbo->placements[c].fpfn = 0; |
148 | rbo->placements[c].mem_type = TTM_PL_SYSTEM0; |
149 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED(1 << 17); |
150 | |
151 | } else if ((rbo->flags & RADEON_GEM_GTT_WC(1 << 2)) || |
152 | rbo->rdev->flags & RADEON_IS_AGP) { |
153 | rbo->placements[c].fpfn = 0; |
154 | rbo->placements[c].mem_type = TTM_PL_SYSTEM0; |
155 | rbo->placements[c++].flags = TTM_PL_FLAG_WC(1 << 18) | |
156 | TTM_PL_FLAG_UNCACHED(1 << 17); |
157 | } else { |
158 | rbo->placements[c].fpfn = 0; |
159 | rbo->placements[c].mem_type = TTM_PL_SYSTEM0; |
160 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED(1 << 16); |
161 | } |
162 | } |
163 | if (!c) { |
164 | rbo->placements[c].fpfn = 0; |
165 | rbo->placements[c].mem_type = TTM_PL_SYSTEM0; |
166 | rbo->placements[c++].flags = TTM_PL_MASK_CACHING((1 << 16) | (1 << 17) | (1 << 18)); |
167 | } |
168 | |
169 | rbo->placement.num_placement = c; |
170 | rbo->placement.num_busy_placement = c; |
171 | |
172 | for (i = 0; i < c; ++i) { |
173 | if ((rbo->flags & RADEON_GEM_CPU_ACCESS(1 << 3)) && |
174 | (rbo->placements[i].mem_type == TTM_PL_VRAM2) && |
175 | !rbo->placements[i].fpfn) |
176 | rbo->placements[i].lpfn = |
177 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT12; |
178 | else |
179 | rbo->placements[i].lpfn = 0; |
180 | } |
181 | } |
182 | |
183 | int radeon_bo_create(struct radeon_device *rdev, |
184 | unsigned long size, int byte_align, bool_Bool kernel, |
185 | u32 domain, u32 flags, struct sg_table *sg, |
186 | struct dma_resv *resv, |
187 | struct radeon_bo **bo_ptr) |
188 | { |
189 | struct radeon_bo *bo; |
190 | enum ttm_bo_type type; |
191 | unsigned long page_align = roundup(byte_align, PAGE_SIZE)((((byte_align)+(((1 << 12))-1))/((1 << 12)))*((1 << 12))) >> PAGE_SHIFT12; |
192 | size_t acc_size; |
193 | int r; |
194 | |
195 | size = roundup2(size, PAGE_SIZE)(((size) + (((1 << 12)) - 1)) & (~((__typeof(size)) ((1 << 12)) - 1))); |
196 | |
197 | if (kernel) { |
198 | type = ttm_bo_type_kernel; |
199 | } else if (sg) { |
200 | type = ttm_bo_type_sg; |
201 | } else { |
202 | type = ttm_bo_type_device; |
203 | } |
204 | *bo_ptr = NULL((void *)0); |
205 | |
206 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, |
207 | sizeof(struct radeon_bo)); |
208 | |
209 | bo = pool_get(&rdev->ddev->objpl, PR_WAITOK0x0001 | PR_ZERO0x0008); |
210 | if (bo == NULL((void *)0)) |
211 | return -ENOMEM12; |
212 | drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size); |
213 | bo->rdev = rdev; |
214 | bo->surface_reg = -1; |
215 | INIT_LIST_HEAD(&bo->list); |
216 | INIT_LIST_HEAD(&bo->va); |
217 | bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM0x4 | |
218 | RADEON_GEM_DOMAIN_GTT0x2 | |
219 | RADEON_GEM_DOMAIN_CPU0x1); |
220 | |
221 | bo->flags = flags; |
222 | /* PCI GART is always snooped */ |
223 | if (!(rdev->flags & RADEON_IS_PCIE)) |
224 | bo->flags &= ~(RADEON_GEM_GTT_WC(1 << 2) | RADEON_GEM_GTT_UC(1 << 1)); |
225 | |
226 | /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx |
227 | * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 |
228 | */ |
229 | if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) |
230 | bo->flags &= ~(RADEON_GEM_GTT_WC(1 << 2) | RADEON_GEM_GTT_UC(1 << 1)); |
231 | |
232 | #ifdef CONFIG_X86_32 |
233 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
234 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 |
235 | */ |
236 | bo->flags &= ~(RADEON_GEM_GTT_WC(1 << 2) | RADEON_GEM_GTT_UC(1 << 1)); |
237 | #elif defined(CONFIG_X861) && !defined(CONFIG_X86_PAT1) |
238 | /* Don't try to enable write-combining when it can't work, or things |
239 | * may be slow |
240 | * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 |
241 | */ |
242 | #ifndef CONFIG_COMPILE_TEST |
243 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT1 for better performance \ |
244 | thanks to write-combining |
245 | #endif |
246 | |
247 | if (bo->flags & RADEON_GEM_GTT_WC(1 << 2)) |
248 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "({ static int __warned; if (!__warned) { printk("\0016" "[" "drm" "] " "Please enable CONFIG_MTRR and CONFIG_X86_PAT for " "better performance thanks to write-combining\n" ); __warned = 1; } }) |
249 | "better performance thanks to write-combining\n")({ static int __warned; if (!__warned) { printk("\0016" "[" "drm" "] " "Please enable CONFIG_MTRR and CONFIG_X86_PAT for " "better performance thanks to write-combining\n" ); __warned = 1; } }); |
250 | bo->flags &= ~(RADEON_GEM_GTT_WC(1 << 2) | RADEON_GEM_GTT_UC(1 << 1)); |
251 | #else |
252 | /* For architectures that don't support WC memory, |
253 | * mask out the WC flag from the BO |
254 | */ |
255 | if (!drm_arch_can_wc_memory()) |
256 | bo->flags &= ~RADEON_GEM_GTT_WC(1 << 2); |
257 | #endif |
258 | |
259 | radeon_ttm_placement_from_domain(bo, domain); |
260 | /* Kernel allocation are uninterruptible */ |
261 | down_read(&rdev->pm.mclk_lock)rw_enter_read(&rdev->pm.mclk_lock); |
262 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
263 | &bo->placement, page_align, !kernel, acc_size, |
264 | sg, resv, &radeon_ttm_bo_destroy); |
265 | up_read(&rdev->pm.mclk_lock)rw_exit_read(&rdev->pm.mclk_lock); |
266 | if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) { |
267 | return r; |
268 | } |
269 | *bo_ptr = bo; |
270 | |
271 | trace_radeon_bo_create(bo); |
272 | |
273 | return 0; |
274 | } |
275 | |
276 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
277 | { |
278 | bool_Bool is_iomem; |
279 | int r; |
280 | |
281 | if (bo->kptr) { |
282 | if (ptr) { |
283 | *ptr = bo->kptr; |
284 | } |
285 | return 0; |
286 | } |
287 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
288 | if (r) { |
289 | return r; |
290 | } |
291 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
292 | if (ptr) { |
293 | *ptr = bo->kptr; |
294 | } |
295 | radeon_bo_check_tiling(bo, 0, 0); |
296 | return 0; |
297 | } |
298 | |
299 | void radeon_bo_kunmap(struct radeon_bo *bo) |
300 | { |
301 | if (bo->kptr == NULL((void *)0)) |
302 | return; |
303 | bo->kptr = NULL((void *)0); |
304 | radeon_bo_check_tiling(bo, 0, 0); |
305 | ttm_bo_kunmap(&bo->kmap); |
306 | } |
307 | |
308 | struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) |
309 | { |
310 | if (bo == NULL((void *)0)) |
311 | return NULL((void *)0); |
312 | |
313 | ttm_bo_get(&bo->tbo); |
314 | return bo; |
315 | } |
316 | |
317 | void radeon_bo_unref(struct radeon_bo **bo) |
318 | { |
319 | struct ttm_buffer_object *tbo; |
320 | |
321 | if ((*bo) == NULL((void *)0)) |
322 | return; |
323 | tbo = &((*bo)->tbo); |
324 | ttm_bo_put(tbo); |
325 | *bo = NULL((void *)0); |
326 | } |
327 | |
328 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
329 | u64 *gpu_addr) |
330 | { |
331 | struct ttm_operation_ctx ctx = { false0, false0 }; |
332 | int r, i; |
333 | |
334 | if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) |
335 | return -EPERM1; |
336 | |
337 | if (bo->pin_count) { |
338 | bo->pin_count++; |
339 | if (gpu_addr) |
340 | *gpu_addr = radeon_bo_gpu_offset(bo); |
341 | |
342 | if (max_offset != 0) { |
343 | u64 domain_start; |
344 | |
345 | if (domain == RADEON_GEM_DOMAIN_VRAM0x4) |
346 | domain_start = bo->rdev->mc.vram_start; |
347 | else |
348 | domain_start = bo->rdev->mc.gtt_start; |
349 | WARN_ON_ONCE(max_offset <({ static int __warned; int __ret = !!(max_offset < (radeon_bo_gpu_offset (bo) - domain_start)); if (__ret && !__warned) { printf ("WARNING %s failed at %s:%d\n", "max_offset < (radeon_bo_gpu_offset(bo) - domain_start)" , "/usr/src/sys/dev/pci/drm/radeon/radeon_object.c", 350); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
350 | (radeon_bo_gpu_offset(bo) - domain_start))({ static int __warned; int __ret = !!(max_offset < (radeon_bo_gpu_offset (bo) - domain_start)); if (__ret && !__warned) { printf ("WARNING %s failed at %s:%d\n", "max_offset < (radeon_bo_gpu_offset(bo) - domain_start)" , "/usr/src/sys/dev/pci/drm/radeon/radeon_object.c", 350); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
351 | } |
352 | |
353 | return 0; |
354 | } |
355 | if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM0x4) { |
356 | /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */ |
357 | return -EINVAL22; |
358 | } |
359 | |
360 | radeon_ttm_placement_from_domain(bo, domain); |
361 | for (i = 0; i < bo->placement.num_placement; i++) { |
362 | /* force to pin into visible video ram */ |
363 | if ((bo->placements[i].mem_type == TTM_PL_VRAM2) && |
364 | !(bo->flags & RADEON_GEM_NO_CPU_ACCESS(1 << 4)) && |
365 | (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) |
366 | bo->placements[i].lpfn = |
367 | bo->rdev->mc.visible_vram_size >> PAGE_SHIFT12; |
368 | else |
369 | bo->placements[i].lpfn = max_offset >> PAGE_SHIFT12; |
370 | |
371 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT(1 << 21); |
372 | } |
373 | |
374 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
375 | if (likely(r == 0)__builtin_expect(!!(r == 0), 1)) { |
376 | bo->pin_count = 1; |
377 | if (gpu_addr != NULL((void *)0)) |
378 | *gpu_addr = radeon_bo_gpu_offset(bo); |
379 | if (domain == RADEON_GEM_DOMAIN_VRAM0x4) |
380 | bo->rdev->vram_pin_size += radeon_bo_size(bo); |
381 | else |
382 | bo->rdev->gart_pin_size += radeon_bo_size(bo); |
383 | } else { |
384 | dev_err(bo->rdev->dev, "%p pin failed\n", bo)printf("drm:pid%d:%s *ERROR* " "%p pin failed\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , bo); |
385 | } |
386 | return r; |
387 | } |
388 | |
389 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
390 | { |
391 | return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); |
392 | } |
393 | |
394 | int radeon_bo_unpin(struct radeon_bo *bo) |
395 | { |
396 | struct ttm_operation_ctx ctx = { false0, false0 }; |
397 | int r, i; |
398 | |
399 | if (!bo->pin_count) { |
400 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo)printf("drm:pid%d:%s *WARNING* " "%p unpin not necessary\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , bo); |
401 | return 0; |
402 | } |
403 | bo->pin_count--; |
404 | if (bo->pin_count) |
405 | return 0; |
406 | for (i = 0; i < bo->placement.num_placement; i++) { |
407 | bo->placements[i].lpfn = 0; |
408 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT(1 << 21); |
409 | } |
410 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
411 | if (likely(r == 0)__builtin_expect(!!(r == 0), 1)) { |
412 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM2) |
413 | bo->rdev->vram_pin_size -= radeon_bo_size(bo); |
414 | else |
415 | bo->rdev->gart_pin_size -= radeon_bo_size(bo); |
416 | } else { |
417 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo)printf("drm:pid%d:%s *ERROR* " "%p validate failed for unpin\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , bo); |
418 | } |
419 | return r; |
420 | } |
421 | |
422 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
423 | { |
424 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
425 | #ifndef CONFIG_HIBERNATION |
426 | if (rdev->flags & RADEON_IS_IGP) { |
427 | if (rdev->mc.igp_sideport_enabled == false0) |
428 | /* Useless to evict on IGP chips */ |
429 | return 0; |
430 | } |
431 | #endif |
432 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM2); |
433 | } |
434 | |
435 | void radeon_bo_force_delete(struct radeon_device *rdev) |
436 | { |
437 | struct radeon_bo *bo, *n; |
438 | |
439 | if (list_empty(&rdev->gem.objects)) { |
440 | return; |
441 | } |
442 | dev_err(rdev->dev, "Userspace still has active objects !\n")printf("drm:pid%d:%s *ERROR* " "Userspace still has active objects !\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
443 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list)for (bo = ({ const __typeof( ((__typeof(*bo) *)0)->list ) * __mptr = ((&rdev->gem.objects)->next); (__typeof(*bo ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*bo), list ) );}), n = ({ const __typeof( ((__typeof(*bo) *)0)->list ) *__mptr = (bo->list.next); (__typeof(*bo) *)( (char *)__mptr - __builtin_offsetof(__typeof(*bo), list) );}); &bo-> list != (&rdev->gem.objects); bo = n, n = ({ const __typeof ( ((__typeof(*n) *)0)->list ) *__mptr = (n->list.next); (__typeof(*n) *)( (char *)__mptr - __builtin_offsetof(__typeof (*n), list) );})) { |
444 | dev_err(rdev->dev, "%p %p %lu %lu force free\n",printf("drm:pid%d:%s *ERROR* " "%p %p %lu %lu force free\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , &bo ->tbo.base, bo, (unsigned long)bo->tbo.base.size, *((unsigned long *)&bo->tbo.base.refcount)) |
445 | &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,printf("drm:pid%d:%s *ERROR* " "%p %p %lu %lu force free\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , &bo ->tbo.base, bo, (unsigned long)bo->tbo.base.size, *((unsigned long *)&bo->tbo.base.refcount)) |
446 | *((unsigned long *)&bo->tbo.base.refcount))printf("drm:pid%d:%s *ERROR* " "%p %p %lu %lu force free\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , &bo ->tbo.base, bo, (unsigned long)bo->tbo.base.size, *((unsigned long *)&bo->tbo.base.refcount)); |
447 | mutex_lock(&bo->rdev->gem.mutex)rw_enter_write(&bo->rdev->gem.mutex); |
448 | list_del_init(&bo->list); |
449 | mutex_unlock(&bo->rdev->gem.mutex)rw_exit_write(&bo->rdev->gem.mutex); |
450 | /* this should unref the ttm bo */ |
451 | drm_gem_object_put(&bo->tbo.base); |
452 | } |
453 | } |
454 | |
455 | int radeon_bo_init(struct radeon_device *rdev) |
456 | { |
457 | paddr_t start, end; |
458 | |
459 | #ifdef __linux__ |
460 | /* reserve PAT memory space to WC for VRAM */ |
461 | arch_io_reserve_memtype_wc(rdev->mc.aper_base, |
462 | rdev->mc.aper_size); |
463 | #endif |
464 | |
465 | /* Add an MTRR for the VRAM */ |
466 | if (!rdev->fastfb_working) { |
467 | #ifdef __linux__ |
468 | rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, |
469 | rdev->mc.aper_size); |
470 | #else |
471 | drm_mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, DRM_MTRR_WC(1<<1)); |
472 | /* fake a 'cookie', seems to be unused? */ |
473 | rdev->mc.vram_mtrr = 1; |
474 | #endif |
475 | } |
476 | |
477 | start = atop(bus_space_mmap(rdev->memt, rdev->mc.aper_base, 0, 0, 0))((((rdev->memt)->mmap((rdev->mc.aper_base), (0), (0) , (0)))) >> 12); |
478 | end = start + atop(rdev->mc.aper_size)((rdev->mc.aper_size) >> 12); |
479 | uvm_page_physload(start, end, start, end, PHYSLOAD_DEVICE0x01); |
480 | |
481 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",printk("\0016" "[" "drm" "] " "Detected VRAM RAM=%lluM, BAR=%lluM\n" , rdev->mc.mc_vram_size >> 20, (unsigned long long)rdev ->mc.aper_size >> 20) |
482 | rdev->mc.mc_vram_size >> 20,printk("\0016" "[" "drm" "] " "Detected VRAM RAM=%lluM, BAR=%lluM\n" , rdev->mc.mc_vram_size >> 20, (unsigned long long)rdev ->mc.aper_size >> 20) |
483 | (unsigned long long)rdev->mc.aper_size >> 20)printk("\0016" "[" "drm" "] " "Detected VRAM RAM=%lluM, BAR=%lluM\n" , rdev->mc.mc_vram_size >> 20, (unsigned long long)rdev ->mc.aper_size >> 20); |
484 | DRM_INFO("RAM width %dbits %cDR\n",printk("\0016" "[" "drm" "] " "RAM width %dbits %cDR\n", rdev ->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S') |
485 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S')printk("\0016" "[" "drm" "] " "RAM width %dbits %cDR\n", rdev ->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); |
486 | return radeon_ttm_init(rdev); |
487 | } |
488 | |
489 | void radeon_bo_fini(struct radeon_device *rdev) |
490 | { |
491 | radeon_ttm_fini(rdev); |
492 | #ifdef __linux__ |
493 | arch_phys_wc_del(rdev->mc.vram_mtrr); |
494 | arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size); |
495 | #else |
496 | drm_mtrr_del(0, rdev->mc.aper_base, rdev->mc.aper_size, DRM_MTRR_WC(1<<1)); |
497 | #endif |
498 | } |
499 | |
500 | /* Returns how many bytes TTM can move per IB. |
501 | */ |
502 | static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) |
503 | { |
504 | u64 real_vram_size = rdev->mc.real_vram_size; |
505 | u64 vram_usage = atomic64_read(&rdev->vram_usage)({ typeof(*(&rdev->vram_usage)) __tmp = *(volatile typeof (*(&rdev->vram_usage)) *)&(*(&rdev->vram_usage )); membar_datadep_consumer(); __tmp; }); |
506 | |
507 | /* This function is based on the current VRAM usage. |
508 | * |
509 | * - If all of VRAM is free, allow relocating the number of bytes that |
510 | * is equal to 1/4 of the size of VRAM for this IB. |
511 | |
512 | * - If more than one half of VRAM is occupied, only allow relocating |
513 | * 1 MB of data for this IB. |
514 | * |
515 | * - From 0 to one half of used VRAM, the threshold decreases |
516 | * linearly. |
517 | * __________________ |
518 | * 1/4 of -|\ | |
519 | * VRAM | \ | |
520 | * | \ | |
521 | * | \ | |
522 | * | \ | |
523 | * | \ | |
524 | * | \ | |
525 | * | \________|1 MB |
526 | * |----------------| |
527 | * VRAM 0 % 100 % |
528 | * used used |
529 | * |
530 | * Note: It's a threshold, not a limit. The threshold must be crossed |
531 | * for buffer relocations to stop, so any buffer of an arbitrary size |
532 | * can be moved as long as the threshold isn't crossed before |
533 | * the relocation takes place. We don't want to disable buffer |
534 | * relocations completely. |
535 | * |
536 | * The idea is that buffers should be placed in VRAM at creation time |
537 | * and TTM should only do a minimum number of relocations during |
538 | * command submission. In practice, you need to submit at least |
539 | * a dozen IBs to move all buffers to VRAM if they are in GTT. |
540 | * |
541 | * Also, things can get pretty crazy under memory pressure and actual |
542 | * VRAM usage can change a lot, so playing safe even at 50% does |
543 | * consistently increase performance. |
544 | */ |
545 | |
546 | u64 half_vram = real_vram_size >> 1; |
547 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; |
548 | u64 bytes_moved_threshold = half_free_vram >> 1; |
549 | return max(bytes_moved_threshold, 1024*1024ull)(((bytes_moved_threshold)>(1024*1024ull))?(bytes_moved_threshold ):(1024*1024ull)); |
550 | } |
551 | |
552 | int radeon_bo_list_validate(struct radeon_device *rdev, |
553 | struct ww_acquire_ctx *ticket, |
554 | struct list_head *head, int ring) |
555 | { |
556 | struct ttm_operation_ctx ctx = { true1, false0 }; |
557 | struct radeon_bo_list *lobj; |
558 | struct list_head duplicates; |
559 | int r; |
560 | u64 bytes_moved = 0, initial_bytes_moved; |
561 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); |
562 | |
563 | INIT_LIST_HEAD(&duplicates); |
564 | r = ttm_eu_reserve_buffers(ticket, head, true1, &duplicates); |
565 | if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) { |
566 | return r; |
567 | } |
568 | |
569 | list_for_each_entry(lobj, head, tv.head)for (lobj = ({ const __typeof( ((__typeof(*lobj) *)0)->tv. head ) *__mptr = ((head)->next); (__typeof(*lobj) *)( (char *)__mptr - __builtin_offsetof(__typeof(*lobj), tv.head) );}) ; &lobj->tv.head != (head); lobj = ({ const __typeof( ( (__typeof(*lobj) *)0)->tv.head ) *__mptr = (lobj->tv.head .next); (__typeof(*lobj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*lobj), tv.head) );})) { |
570 | struct radeon_bo *bo = lobj->robj; |
571 | if (!bo->pin_count) { |
572 | u32 domain = lobj->preferred_domains; |
573 | u32 allowed = lobj->allowed_domains; |
574 | u32 current_domain = |
575 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); |
576 | |
577 | /* Check if this buffer will be moved and don't move it |
578 | * if we have moved too many buffers for this IB already. |
579 | * |
580 | * Note that this allows moving at least one buffer of |
581 | * any size, because it doesn't take the current "bo" |
582 | * into account. We don't want to disallow buffer moves |
583 | * completely. |
584 | */ |
585 | if ((allowed & current_domain) != 0 && |
586 | (domain & current_domain) == 0 && /* will be moved */ |
587 | bytes_moved > bytes_moved_threshold) { |
588 | /* don't move it */ |
589 | domain = current_domain; |
590 | } |
591 | |
592 | retry: |
593 | radeon_ttm_placement_from_domain(bo, domain); |
594 | if (ring == R600_RING_TYPE_UVD_INDEX5) |
595 | radeon_uvd_force_into_uvd_segment(bo, allowed); |
596 | |
597 | initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved)({ typeof(*(&rdev->num_bytes_moved)) __tmp = *(volatile typeof(*(&rdev->num_bytes_moved)) *)&(*(&rdev ->num_bytes_moved)); membar_datadep_consumer(); __tmp; }); |
598 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
599 | bytes_moved += atomic64_read(&rdev->num_bytes_moved)({ typeof(*(&rdev->num_bytes_moved)) __tmp = *(volatile typeof(*(&rdev->num_bytes_moved)) *)&(*(&rdev ->num_bytes_moved)); membar_datadep_consumer(); __tmp; }) - |
600 | initial_bytes_moved; |
601 | |
602 | if (unlikely(r)__builtin_expect(!!(r), 0)) { |
603 | if (r != -ERESTARTSYS4 && |
604 | domain != lobj->allowed_domains) { |
605 | domain = lobj->allowed_domains; |
606 | goto retry; |
607 | } |
608 | ttm_eu_backoff_reservation(ticket, head); |
609 | return r; |
610 | } |
611 | } |
612 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
613 | lobj->tiling_flags = bo->tiling_flags; |
614 | } |
615 | |
616 | list_for_each_entry(lobj, &duplicates, tv.head)for (lobj = ({ const __typeof( ((__typeof(*lobj) *)0)->tv. head ) *__mptr = ((&duplicates)->next); (__typeof(*lobj ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*lobj), tv .head) );}); &lobj->tv.head != (&duplicates); lobj = ({ const __typeof( ((__typeof(*lobj) *)0)->tv.head ) *__mptr = (lobj->tv.head.next); (__typeof(*lobj) *)( (char *)__mptr - __builtin_offsetof(__typeof(*lobj), tv.head) );})) { |
617 | lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj); |
618 | lobj->tiling_flags = lobj->robj->tiling_flags; |
619 | } |
620 | |
621 | return 0; |
622 | } |
623 | |
624 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
625 | { |
626 | struct radeon_device *rdev = bo->rdev; |
627 | struct radeon_surface_reg *reg; |
628 | struct radeon_bo *old_object; |
629 | int steal; |
630 | int i; |
631 | |
632 | dma_resv_assert_held(bo->tbo.base.resv)do { (void)(&(bo->tbo.base.resv)->lock.base); } while (0); |
633 | |
634 | if (!bo->tiling_flags) |
635 | return 0; |
636 | |
637 | if (bo->surface_reg >= 0) { |
638 | reg = &rdev->surface_regs[bo->surface_reg]; |
Value stored to 'reg' is never read | |
639 | i = bo->surface_reg; |
640 | goto out; |
641 | } |
642 | |
643 | steal = -1; |
644 | for (i = 0; i < RADEON_GEM_MAX_SURFACES8; i++) { |
645 | |
646 | reg = &rdev->surface_regs[i]; |
647 | if (!reg->bo) |
648 | break; |
649 | |
650 | old_object = reg->bo; |
651 | if (old_object->pin_count == 0) |
652 | steal = i; |
653 | } |
654 | |
655 | /* if we are all out */ |
656 | if (i == RADEON_GEM_MAX_SURFACES8) { |
657 | if (steal == -1) |
658 | return -ENOMEM12; |
659 | /* find someone with a surface reg and nuke their BO */ |
660 | reg = &rdev->surface_regs[steal]; |
661 | old_object = reg->bo; |
662 | /* blow away the mapping */ |
663 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object)__drm_dbg(DRM_UT_CORE, "stealing surface reg %d from %p\n", steal , old_object); |
664 | ttm_bo_unmap_virtual(&old_object->tbo); |
665 | old_object->surface_reg = -1; |
666 | i = steal; |
667 | } |
668 | |
669 | bo->surface_reg = i; |
670 | reg->bo = bo; |
671 | |
672 | out: |
673 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,((rdev)->asic->surface.set_reg((rdev), (i), (bo->tiling_flags ), (bo->pitch), (bo->tbo.mem.start << 12), (bo-> tbo.num_pages << 12))) |
674 | bo->tbo.mem.start << PAGE_SHIFT,((rdev)->asic->surface.set_reg((rdev), (i), (bo->tiling_flags ), (bo->pitch), (bo->tbo.mem.start << 12), (bo-> tbo.num_pages << 12))) |
675 | bo->tbo.num_pages << PAGE_SHIFT)((rdev)->asic->surface.set_reg((rdev), (i), (bo->tiling_flags ), (bo->pitch), (bo->tbo.mem.start << 12), (bo-> tbo.num_pages << 12))); |
676 | return 0; |
677 | } |
678 | |
679 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
680 | { |
681 | struct radeon_device *rdev = bo->rdev; |
682 | struct radeon_surface_reg *reg; |
683 | |
684 | if (bo->surface_reg == -1) |
685 | return; |
686 | |
687 | reg = &rdev->surface_regs[bo->surface_reg]; |
688 | radeon_clear_surface_reg(rdev, bo->surface_reg)((rdev)->asic->surface.clear_reg((rdev), (bo->surface_reg ))); |
689 | |
690 | reg->bo = NULL((void *)0); |
691 | bo->surface_reg = -1; |
692 | } |
693 | |
694 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
695 | uint32_t tiling_flags, uint32_t pitch) |
696 | { |
697 | struct radeon_device *rdev = bo->rdev; |
698 | int r; |
699 | |
700 | if (rdev->family >= CHIP_CEDAR) { |
701 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; |
702 | |
703 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT8) & RADEON_TILING_EG_BANKW_MASK0xf; |
704 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT12) & RADEON_TILING_EG_BANKH_MASK0xf; |
705 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT16) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK0xf; |
706 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT24) & RADEON_TILING_EG_TILE_SPLIT_MASK0xf; |
707 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT28) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK0xf; |
708 | switch (bankw) { |
709 | case 0: |
710 | case 1: |
711 | case 2: |
712 | case 4: |
713 | case 8: |
714 | break; |
715 | default: |
716 | return -EINVAL22; |
717 | } |
718 | switch (bankh) { |
719 | case 0: |
720 | case 1: |
721 | case 2: |
722 | case 4: |
723 | case 8: |
724 | break; |
725 | default: |
726 | return -EINVAL22; |
727 | } |
728 | switch (mtaspect) { |
729 | case 0: |
730 | case 1: |
731 | case 2: |
732 | case 4: |
733 | case 8: |
734 | break; |
735 | default: |
736 | return -EINVAL22; |
737 | } |
738 | if (tilesplit > 6) { |
739 | return -EINVAL22; |
740 | } |
741 | if (stilesplit > 6) { |
742 | return -EINVAL22; |
743 | } |
744 | } |
745 | r = radeon_bo_reserve(bo, false0); |
746 | if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) |
747 | return r; |
748 | bo->tiling_flags = tiling_flags; |
749 | bo->pitch = pitch; |
750 | radeon_bo_unreserve(bo); |
751 | return 0; |
752 | } |
753 | |
754 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
755 | uint32_t *tiling_flags, |
756 | uint32_t *pitch) |
757 | { |
758 | dma_resv_assert_held(bo->tbo.base.resv)do { (void)(&(bo->tbo.base.resv)->lock.base); } while (0); |
759 | |
760 | if (tiling_flags) |
761 | *tiling_flags = bo->tiling_flags; |
762 | if (pitch) |
763 | *pitch = bo->pitch; |
764 | } |
765 | |
766 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool_Bool has_moved, |
767 | bool_Bool force_drop) |
768 | { |
769 | if (!force_drop) |
770 | dma_resv_assert_held(bo->tbo.base.resv)do { (void)(&(bo->tbo.base.resv)->lock.base); } while (0); |
771 | |
772 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE0x10)) |
773 | return 0; |
774 | |
775 | if (force_drop) { |
776 | radeon_bo_clear_surface_reg(bo); |
777 | return 0; |
778 | } |
779 | |
780 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM2) { |
781 | if (!has_moved) |
782 | return 0; |
783 | |
784 | if (bo->surface_reg >= 0) |
785 | radeon_bo_clear_surface_reg(bo); |
786 | return 0; |
787 | } |
788 | |
789 | if ((bo->surface_reg >= 0) && !has_moved) |
790 | return 0; |
791 | |
792 | return radeon_bo_get_surface_reg(bo); |
793 | } |
794 | |
795 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, |
796 | bool_Bool evict, |
797 | struct ttm_resource *new_mem) |
798 | { |
799 | struct radeon_bo *rbo; |
800 | |
801 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
802 | return; |
803 | |
804 | rbo = container_of(bo, struct radeon_bo, tbo)({ const __typeof( ((struct radeon_bo *)0)->tbo ) *__mptr = (bo); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof (struct radeon_bo, tbo) );}); |
805 | radeon_bo_check_tiling(rbo, 0, 1); |
806 | radeon_vm_bo_invalidate(rbo->rdev, rbo); |
807 | |
808 | /* update statistics */ |
809 | if (!new_mem) |
810 | return; |
811 | |
812 | radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); |
813 | radeon_update_memory_usage(rbo, new_mem->mem_type, 1); |
814 | } |
815 | |
816 | int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
817 | { |
818 | struct ttm_operation_ctx ctx = { false0, false0 }; |
819 | struct radeon_device *rdev; |
820 | struct radeon_bo *rbo; |
821 | unsigned long offset, size, lpfn; |
822 | int i, r; |
823 | |
824 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
825 | return 0; |
826 | rbo = container_of(bo, struct radeon_bo, tbo)({ const __typeof( ((struct radeon_bo *)0)->tbo ) *__mptr = (bo); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof (struct radeon_bo, tbo) );}); |
827 | radeon_bo_check_tiling(rbo, 0, 0); |
828 | rdev = rbo->rdev; |
829 | if (bo->mem.mem_type != TTM_PL_VRAM2) |
830 | return 0; |
831 | |
832 | size = bo->mem.num_pages << PAGE_SHIFT12; |
833 | offset = bo->mem.start << PAGE_SHIFT12; |
834 | if ((offset + size) <= rdev->mc.visible_vram_size) |
835 | return 0; |
836 | |
837 | /* Can't move a pinned BO to visible VRAM */ |
838 | if (rbo->pin_count > 0) |
839 | return -EINVAL22; |
840 | |
841 | /* hurrah the memory is not visible ! */ |
842 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM0x4); |
843 | lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT12; |
844 | for (i = 0; i < rbo->placement.num_placement; i++) { |
845 | /* Force into visible VRAM */ |
846 | if ((rbo->placements[i].mem_type == TTM_PL_VRAM2) && |
847 | (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) |
848 | rbo->placements[i].lpfn = lpfn; |
849 | } |
850 | r = ttm_bo_validate(bo, &rbo->placement, &ctx); |
851 | if (unlikely(r == -ENOMEM)__builtin_expect(!!(r == -12), 0)) { |
852 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT0x2); |
853 | return ttm_bo_validate(bo, &rbo->placement, &ctx); |
854 | } else if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) { |
855 | return r; |
856 | } |
857 | |
858 | offset = bo->mem.start << PAGE_SHIFT12; |
859 | /* this should never happen */ |
860 | if ((offset + size) > rdev->mc.visible_vram_size) |
861 | return -EINVAL22; |
862 | |
863 | return 0; |
864 | } |
865 | |
866 | int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool_Bool no_wait) |
867 | { |
868 | int r; |
869 | |
870 | r = ttm_bo_reserve(&bo->tbo, true1, no_wait, NULL((void *)0)); |
871 | if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) |
872 | return r; |
873 | if (mem_type) |
874 | *mem_type = bo->tbo.mem.mem_type; |
875 | |
876 | r = ttm_bo_wait(&bo->tbo, true1, no_wait); |
877 | ttm_bo_unreserve(&bo->tbo); |
878 | return r; |
879 | } |
880 | |
881 | /** |
882 | * radeon_bo_fence - add fence to buffer object |
883 | * |
884 | * @bo: buffer object in question |
885 | * @fence: fence to add |
886 | * @shared: true if fence should be added shared |
887 | * |
888 | */ |
889 | void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, |
890 | bool_Bool shared) |
891 | { |
892 | struct dma_resv *resv = bo->tbo.base.resv; |
893 | |
894 | if (shared) |
895 | dma_resv_add_shared_fence(resv, &fence->base); |
896 | else |
897 | dma_resv_add_excl_fence(resv, &fence->base); |
898 | } |