File: | dev/pci/drm/radeon/radeon_cs.c |
Warning: | line 169, column 4 Value stored to 'need_mmap_lock' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2008 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice (including the next |
13 | * paragraph) shall be included in all copies or substantial portions of the |
14 | * Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
22 | * DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: |
25 | * Jerome Glisse <glisse@freedesktop.org> |
26 | */ |
27 | |
28 | #include <linux/list_sort.h> |
29 | #include <linux/pci.h> |
30 | #include <linux/uaccess.h> |
31 | |
32 | #include <drm/drm_device.h> |
33 | #include <drm/drm_file.h> |
34 | #include <drm/radeon_drm.h> |
35 | |
36 | #include "radeon.h" |
37 | #include "radeon_reg.h" |
38 | #include "radeon_trace.h" |
39 | |
40 | #define RADEON_CS_MAX_PRIORITY32u 32u |
41 | #define RADEON_CS_NUM_BUCKETS(32u + 1) (RADEON_CS_MAX_PRIORITY32u + 1) |
42 | |
43 | /* This is based on the bucket sort with O(n) time complexity. |
44 | * An item with priority "i" is added to bucket[i]. The lists are then |
45 | * concatenated in descending order. |
46 | */ |
47 | struct radeon_cs_buckets { |
48 | struct list_head bucket[RADEON_CS_NUM_BUCKETS(32u + 1)]; |
49 | }; |
50 | |
51 | static void radeon_cs_buckets_init(struct radeon_cs_buckets *b) |
52 | { |
53 | unsigned i; |
54 | |
55 | for (i = 0; i < RADEON_CS_NUM_BUCKETS(32u + 1); i++) |
56 | INIT_LIST_HEAD(&b->bucket[i]); |
57 | } |
58 | |
59 | static void radeon_cs_buckets_add(struct radeon_cs_buckets *b, |
60 | struct list_head *item, unsigned priority) |
61 | { |
62 | /* Since buffers which appear sooner in the relocation list are |
63 | * likely to be used more often than buffers which appear later |
64 | * in the list, the sort mustn't change the ordering of buffers |
65 | * with the same priority, i.e. it must be stable. |
66 | */ |
67 | list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)(((priority)<(32u))?(priority):(32u))]); |
68 | } |
69 | |
70 | static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b, |
71 | struct list_head *out_list) |
72 | { |
73 | unsigned i; |
74 | |
75 | /* Connect the sorted buckets in the output list. */ |
76 | for (i = 0; i < RADEON_CS_NUM_BUCKETS(32u + 1); i++) { |
77 | list_splice(&b->bucket[i], out_list); |
78 | } |
79 | } |
80 | |
81 | static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) |
82 | { |
83 | struct radeon_cs_chunk *chunk; |
84 | struct radeon_cs_buckets buckets; |
85 | unsigned i; |
86 | bool_Bool need_mmap_lock = false0; |
87 | int r; |
88 | |
89 | if (p->chunk_relocs == NULL((void *)0)) { |
90 | return 0; |
91 | } |
92 | chunk = p->chunk_relocs; |
93 | p->dma_reloc_idx = 0; |
94 | /* FIXME: we assume that each relocs use 4 dwords */ |
95 | p->nrelocs = chunk->length_dw / 4; |
96 | p->relocs = kvmalloc_array(p->nrelocs, sizeof(struct radeon_bo_list), |
97 | GFP_KERNEL(0x0001 | 0x0004) | __GFP_ZERO0x0008); |
98 | if (p->relocs == NULL((void *)0)) { |
99 | return -ENOMEM12; |
100 | } |
101 | |
102 | radeon_cs_buckets_init(&buckets); |
103 | |
104 | for (i = 0; i < p->nrelocs; i++) { |
105 | struct drm_radeon_cs_reloc *r; |
106 | struct drm_gem_object *gobj; |
107 | unsigned priority; |
108 | |
109 | r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; |
110 | gobj = drm_gem_object_lookup(p->filp, r->handle); |
111 | if (gobj == NULL((void *)0)) { |
112 | DRM_ERROR("gem object lookup failed 0x%x\n",__drm_err("gem object lookup failed 0x%x\n", r->handle) |
113 | r->handle)__drm_err("gem object lookup failed 0x%x\n", r->handle); |
114 | return -ENOENT2; |
115 | } |
116 | p->relocs[i].robj = gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof (struct radeon_bo, tbo.base) );}); |
117 | |
118 | /* The userspace buffer priorities are from 0 to 15. A higher |
119 | * number means the buffer is more important. |
120 | * Also, the buffers used for write have a higher priority than |
121 | * the buffers used for read only, which doubles the range |
122 | * to 0 to 31. 32 is reserved for the kernel driver. |
123 | */ |
124 | priority = (r->flags & RADEON_RELOC_PRIO_MASK(0xf << 0)) * 2 |
125 | + !!r->write_domain; |
126 | |
127 | /* The first reloc of an UVD job is the msg and that must be in |
128 | * VRAM, the second reloc is the DPB and for WMV that must be in |
129 | * VRAM as well. Also put everything into VRAM on AGP cards and older |
130 | * IGP chips to avoid image corruptions |
131 | */ |
132 | if (p->ring == R600_RING_TYPE_UVD_INDEX5 && |
133 | (i <= 0 || (p->rdev->flags & RADEON_IS_AGP) || |
134 | p->rdev->family == CHIP_RS780 || |
135 | p->rdev->family == CHIP_RS880)) { |
136 | |
137 | /* TODO: is this still needed for NI+ ? */ |
138 | p->relocs[i].preferred_domains = |
139 | RADEON_GEM_DOMAIN_VRAM0x4; |
140 | |
141 | p->relocs[i].allowed_domains = |
142 | RADEON_GEM_DOMAIN_VRAM0x4; |
143 | |
144 | /* prioritize this over any other relocation */ |
145 | priority = RADEON_CS_MAX_PRIORITY32u; |
146 | } else { |
147 | uint32_t domain = r->write_domain ? |
148 | r->write_domain : r->read_domains; |
149 | |
150 | if (domain & RADEON_GEM_DOMAIN_CPU0x1) { |
151 | DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "__drm_err("RADEON_GEM_DOMAIN_CPU is not valid " "for command submission\n" ) |
152 | "for command submission\n")__drm_err("RADEON_GEM_DOMAIN_CPU is not valid " "for command submission\n" ); |
153 | return -EINVAL22; |
154 | } |
155 | |
156 | p->relocs[i].preferred_domains = domain; |
157 | if (domain == RADEON_GEM_DOMAIN_VRAM0x4) |
158 | domain |= RADEON_GEM_DOMAIN_GTT0x2; |
159 | p->relocs[i].allowed_domains = domain; |
160 | } |
161 | |
162 | if (radeon_ttm_tt_has_userptr(p->rdev, p->relocs[i].robj->tbo.ttm)) { |
163 | uint32_t domain = p->relocs[i].preferred_domains; |
164 | if (!(domain & RADEON_GEM_DOMAIN_GTT0x2)) { |
165 | DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "__drm_err("Only RADEON_GEM_DOMAIN_GTT is " "allowed for userptr BOs\n" ) |
166 | "allowed for userptr BOs\n")__drm_err("Only RADEON_GEM_DOMAIN_GTT is " "allowed for userptr BOs\n" ); |
167 | return -EINVAL22; |
168 | } |
169 | need_mmap_lock = true1; |
Value stored to 'need_mmap_lock' is never read | |
170 | domain = RADEON_GEM_DOMAIN_GTT0x2; |
171 | p->relocs[i].preferred_domains = domain; |
172 | p->relocs[i].allowed_domains = domain; |
173 | } |
174 | |
175 | /* Objects shared as dma-bufs cannot be moved to VRAM */ |
176 | if (p->relocs[i].robj->prime_shared_count) { |
177 | p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM0x4; |
178 | if (!p->relocs[i].allowed_domains) { |
179 | DRM_ERROR("BO associated with dma-buf cannot "__drm_err("BO associated with dma-buf cannot " "be moved to VRAM\n" ) |
180 | "be moved to VRAM\n")__drm_err("BO associated with dma-buf cannot " "be moved to VRAM\n" ); |
181 | return -EINVAL22; |
182 | } |
183 | } |
184 | |
185 | p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; |
186 | p->relocs[i].tv.num_shared = !r->write_domain; |
187 | |
188 | radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, |
189 | priority); |
190 | } |
191 | |
192 | radeon_cs_buckets_get_list(&buckets, &p->validated); |
193 | |
194 | if (p->cs_flags & RADEON_CS_USE_VM0x02) |
195 | p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, |
196 | &p->validated); |
197 | #ifdef notyet |
198 | if (need_mmap_lock) |
199 | mmap_read_lock(current->mm); |
200 | #endif |
201 | |
202 | r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); |
203 | |
204 | #ifdef notyet |
205 | if (need_mmap_lock) |
206 | mmap_read_unlock(current->mm); |
207 | #endif |
208 | |
209 | return r; |
210 | } |
211 | |
212 | static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) |
213 | { |
214 | p->priority = priority; |
215 | |
216 | switch (ring) { |
217 | default: |
218 | DRM_ERROR("unknown ring id: %d\n", ring)__drm_err("unknown ring id: %d\n", ring); |
219 | return -EINVAL22; |
220 | case RADEON_CS_RING_GFX0: |
221 | p->ring = RADEON_RING_TYPE_GFX_INDEX0; |
222 | break; |
223 | case RADEON_CS_RING_COMPUTE1: |
224 | if (p->rdev->family >= CHIP_TAHITI) { |
225 | if (p->priority > 0) |
226 | p->ring = CAYMAN_RING_TYPE_CP1_INDEX1; |
227 | else |
228 | p->ring = CAYMAN_RING_TYPE_CP2_INDEX2; |
229 | } else |
230 | p->ring = RADEON_RING_TYPE_GFX_INDEX0; |
231 | break; |
232 | case RADEON_CS_RING_DMA2: |
233 | if (p->rdev->family >= CHIP_CAYMAN) { |
234 | if (p->priority > 0) |
235 | p->ring = R600_RING_TYPE_DMA_INDEX3; |
236 | else |
237 | p->ring = CAYMAN_RING_TYPE_DMA1_INDEX4; |
238 | } else if (p->rdev->family >= CHIP_RV770) { |
239 | p->ring = R600_RING_TYPE_DMA_INDEX3; |
240 | } else { |
241 | return -EINVAL22; |
242 | } |
243 | break; |
244 | case RADEON_CS_RING_UVD3: |
245 | p->ring = R600_RING_TYPE_UVD_INDEX5; |
246 | break; |
247 | case RADEON_CS_RING_VCE4: |
248 | /* TODO: only use the low priority ring for now */ |
249 | p->ring = TN_RING_TYPE_VCE1_INDEX6; |
250 | break; |
251 | } |
252 | return 0; |
253 | } |
254 | |
255 | static int radeon_cs_sync_rings(struct radeon_cs_parser *p) |
256 | { |
257 | struct radeon_bo_list *reloc; |
258 | int r; |
259 | |
260 | list_for_each_entry(reloc, &p->validated, tv.head)for (reloc = ({ const __typeof( ((__typeof(*reloc) *)0)->tv .head ) *__mptr = ((&p->validated)->next); (__typeof (*reloc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*reloc ), tv.head) );}); &reloc->tv.head != (&p->validated ); reloc = ({ const __typeof( ((__typeof(*reloc) *)0)->tv. head ) *__mptr = (reloc->tv.head.next); (__typeof(*reloc) * )( (char *)__mptr - __builtin_offsetof(__typeof(*reloc), tv.head ) );})) { |
261 | struct dma_resv *resv; |
262 | |
263 | resv = reloc->robj->tbo.base.resv; |
264 | r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, |
265 | reloc->tv.num_shared); |
266 | if (r) |
267 | return r; |
268 | } |
269 | return 0; |
270 | } |
271 | |
272 | /* XXX: note that this is called from the legacy UMS CS ioctl as well */ |
273 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) |
274 | { |
275 | struct drm_radeon_cs *cs = data; |
276 | uint64_t *chunk_array_ptr; |
277 | unsigned size, i; |
278 | u32 ring = RADEON_CS_RING_GFX0; |
279 | s32 priority = 0; |
280 | |
281 | INIT_LIST_HEAD(&p->validated); |
282 | |
283 | if (!cs->num_chunks) { |
284 | return 0; |
285 | } |
286 | |
287 | /* get chunks */ |
288 | p->idx = 0; |
289 | p->ib.sa_bo = NULL((void *)0); |
290 | p->const_ib.sa_bo = NULL((void *)0); |
291 | p->chunk_ib = NULL((void *)0); |
292 | p->chunk_relocs = NULL((void *)0); |
293 | p->chunk_flags = NULL((void *)0); |
294 | p->chunk_const_ib = NULL((void *)0); |
295 | p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL(0x0001 | 0x0004)); |
296 | if (p->chunks_array == NULL((void *)0)) { |
297 | return -ENOMEM12; |
298 | } |
299 | chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks); |
300 | if (copy_from_user(p->chunks_array, chunk_array_ptr, |
301 | sizeof(uint64_t)*cs->num_chunks)) { |
302 | return -EFAULT14; |
303 | } |
304 | p->cs_flags = 0; |
305 | p->nchunks = cs->num_chunks; |
306 | p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL(0x0001 | 0x0004)); |
307 | if (p->chunks == NULL((void *)0)) { |
308 | return -ENOMEM12; |
309 | } |
310 | for (i = 0; i < p->nchunks; i++) { |
311 | struct drm_radeon_cs_chunk __user **chunk_ptr = NULL((void *)0); |
312 | struct drm_radeon_cs_chunk user_chunk; |
313 | uint32_t __user *cdata; |
314 | |
315 | chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i]; |
316 | if (copy_from_user(&user_chunk, chunk_ptr, |
317 | sizeof(struct drm_radeon_cs_chunk))) { |
318 | return -EFAULT14; |
319 | } |
320 | p->chunks[i].length_dw = user_chunk.length_dw; |
321 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS0x01) { |
322 | p->chunk_relocs = &p->chunks[i]; |
323 | } |
324 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB0x02) { |
325 | p->chunk_ib = &p->chunks[i]; |
326 | /* zero length IB isn't useful */ |
327 | if (p->chunks[i].length_dw == 0) |
328 | return -EINVAL22; |
329 | } |
330 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB0x04) { |
331 | p->chunk_const_ib = &p->chunks[i]; |
332 | /* zero length CONST IB isn't useful */ |
333 | if (p->chunks[i].length_dw == 0) |
334 | return -EINVAL22; |
335 | } |
336 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS0x03) { |
337 | p->chunk_flags = &p->chunks[i]; |
338 | /* zero length flags aren't useful */ |
339 | if (p->chunks[i].length_dw == 0) |
340 | return -EINVAL22; |
341 | } |
342 | |
343 | size = p->chunks[i].length_dw; |
344 | cdata = (void __user *)(unsigned long)user_chunk.chunk_data; |
345 | p->chunks[i].user_ptr = cdata; |
346 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB0x04) |
347 | continue; |
348 | |
349 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB0x02) { |
350 | if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP)) |
351 | continue; |
352 | } |
353 | |
354 | p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL(0x0001 | 0x0004)); |
355 | size *= sizeof(uint32_t); |
356 | if (p->chunks[i].kdata == NULL((void *)0)) { |
357 | return -ENOMEM12; |
358 | } |
359 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { |
360 | return -EFAULT14; |
361 | } |
362 | if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS0x03) { |
363 | p->cs_flags = p->chunks[i].kdata[0]; |
364 | if (p->chunks[i].length_dw > 1) |
365 | ring = p->chunks[i].kdata[1]; |
366 | if (p->chunks[i].length_dw > 2) |
367 | priority = (s32)p->chunks[i].kdata[2]; |
368 | } |
369 | } |
370 | |
371 | /* these are KMS only */ |
372 | if (p->rdev) { |
373 | if ((p->cs_flags & RADEON_CS_USE_VM0x02) && |
374 | !p->rdev->vm_manager.enabled) { |
375 | DRM_ERROR("VM not active on asic!\n")__drm_err("VM not active on asic!\n"); |
376 | return -EINVAL22; |
377 | } |
378 | |
379 | if (radeon_cs_get_ring(p, ring, priority)) |
380 | return -EINVAL22; |
381 | |
382 | /* we only support VM on some SI+ rings */ |
383 | if ((p->cs_flags & RADEON_CS_USE_VM0x02) == 0) { |
384 | if (p->rdev->asic->ring[p->ring]->cs_parse == NULL((void *)0)) { |
385 | DRM_ERROR("Ring %d requires VM!\n", p->ring)__drm_err("Ring %d requires VM!\n", p->ring); |
386 | return -EINVAL22; |
387 | } |
388 | } else { |
389 | if (p->rdev->asic->ring[p->ring]->ib_parse == NULL((void *)0)) { |
390 | DRM_ERROR("VM not supported on ring %d!\n",__drm_err("VM not supported on ring %d!\n", p->ring) |
391 | p->ring)__drm_err("VM not supported on ring %d!\n", p->ring); |
392 | return -EINVAL22; |
393 | } |
394 | } |
395 | } |
396 | |
397 | return 0; |
398 | } |
399 | |
400 | static int cmp_size_smaller_first(void *priv, const struct list_head *a, |
401 | const struct list_head *b) |
402 | { |
403 | struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head)({ const __typeof( ((struct radeon_bo_list *)0)->tv.head ) *__mptr = (a); (struct radeon_bo_list *)( (char *)__mptr - __builtin_offsetof (struct radeon_bo_list, tv.head) );}); |
404 | struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head)({ const __typeof( ((struct radeon_bo_list *)0)->tv.head ) *__mptr = (b); (struct radeon_bo_list *)( (char *)__mptr - __builtin_offsetof (struct radeon_bo_list, tv.head) );}); |
405 | |
406 | /* Sort A before B if A is smaller. */ |
407 | return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; |
408 | } |
409 | |
410 | /** |
411 | * cs_parser_fini() - clean parser states |
412 | * @parser: parser structure holding parsing context. |
413 | * @error: error number |
414 | * |
415 | * If error is set than unvalidate buffer, otherwise just free memory |
416 | * used by parsing context. |
417 | **/ |
418 | static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool_Bool backoff) |
419 | { |
420 | unsigned i; |
421 | |
422 | if (!error) { |
423 | /* Sort the buffer list from the smallest to largest buffer, |
424 | * which affects the order of buffers in the LRU list. |
425 | * This assures that the smallest buffers are added first |
426 | * to the LRU list, so they are likely to be later evicted |
427 | * first, instead of large buffers whose eviction is more |
428 | * expensive. |
429 | * |
430 | * This slightly lowers the number of bytes moved by TTM |
431 | * per frame under memory pressure. |
432 | */ |
433 | list_sort(NULL((void *)0), &parser->validated, cmp_size_smaller_first); |
434 | |
435 | ttm_eu_fence_buffer_objects(&parser->ticket, |
436 | &parser->validated, |
437 | &parser->ib.fence->base); |
438 | } else if (backoff) { |
439 | ttm_eu_backoff_reservation(&parser->ticket, |
440 | &parser->validated); |
441 | } |
442 | |
443 | if (parser->relocs != NULL((void *)0)) { |
444 | for (i = 0; i < parser->nrelocs; i++) { |
445 | struct radeon_bo *bo = parser->relocs[i].robj; |
446 | if (bo == NULL((void *)0)) |
447 | continue; |
448 | |
449 | drm_gem_object_put(&bo->tbo.base); |
450 | } |
451 | } |
452 | kfree(parser->track); |
453 | kvfree(parser->relocs); |
454 | kvfree(parser->vm_bos); |
455 | for (i = 0; i < parser->nchunks; i++) |
456 | kvfree(parser->chunks[i].kdata); |
457 | kfree(parser->chunks); |
458 | kfree(parser->chunks_array); |
459 | radeon_ib_free(parser->rdev, &parser->ib); |
460 | radeon_ib_free(parser->rdev, &parser->const_ib); |
461 | } |
462 | |
463 | static int radeon_cs_ib_chunk(struct radeon_device *rdev, |
464 | struct radeon_cs_parser *parser) |
465 | { |
466 | int r; |
467 | |
468 | if (parser->chunk_ib == NULL((void *)0)) |
469 | return 0; |
470 | |
471 | if (parser->cs_flags & RADEON_CS_USE_VM0x02) |
472 | return 0; |
473 | |
474 | r = radeon_cs_parse(rdev, parser->ring, parser)(rdev)->asic->ring[(parser->ring)]->cs_parse((parser )); |
475 | if (r || parser->parser_error) { |
476 | DRM_ERROR("Invalid command stream !\n")__drm_err("Invalid command stream !\n"); |
477 | return r; |
478 | } |
479 | |
480 | r = radeon_cs_sync_rings(parser); |
481 | if (r) { |
482 | if (r != -ERESTARTSYS4) |
483 | DRM_ERROR("Failed to sync rings: %i\n", r)__drm_err("Failed to sync rings: %i\n", r); |
484 | return r; |
485 | } |
486 | |
487 | if (parser->ring == R600_RING_TYPE_UVD_INDEX5) |
488 | radeon_uvd_note_usage(rdev); |
489 | else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX6) || |
490 | (parser->ring == TN_RING_TYPE_VCE2_INDEX7)) |
491 | radeon_vce_note_usage(rdev); |
492 | |
493 | r = radeon_ib_schedule(rdev, &parser->ib, NULL((void *)0), true1); |
494 | if (r) { |
495 | DRM_ERROR("Failed to schedule IB !\n")__drm_err("Failed to schedule IB !\n"); |
496 | } |
497 | return r; |
498 | } |
499 | |
500 | static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, |
501 | struct radeon_vm *vm) |
502 | { |
503 | struct radeon_device *rdev = p->rdev; |
504 | struct radeon_bo_va *bo_va; |
505 | int i, r; |
506 | |
507 | r = radeon_vm_update_page_directory(rdev, vm); |
508 | if (r) |
509 | return r; |
510 | |
511 | r = radeon_vm_clear_freed(rdev, vm); |
512 | if (r) |
513 | return r; |
514 | |
515 | if (vm->ib_bo_va == NULL((void *)0)) { |
516 | DRM_ERROR("Tmp BO not in VM!\n")__drm_err("Tmp BO not in VM!\n"); |
517 | return -EINVAL22; |
518 | } |
519 | |
520 | r = radeon_vm_bo_update(rdev, vm->ib_bo_va, |
521 | &rdev->ring_tmp_bo.bo->tbo.mem); |
522 | if (r) |
523 | return r; |
524 | |
525 | for (i = 0; i < p->nrelocs; i++) { |
526 | struct radeon_bo *bo; |
527 | |
528 | bo = p->relocs[i].robj; |
529 | bo_va = radeon_vm_bo_find(vm, bo); |
530 | if (bo_va == NULL((void *)0)) { |
531 | dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm)printf("drm:pid%d:%s *ERROR* " "bo %p not in vm %p\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__ , bo, vm); |
532 | return -EINVAL22; |
533 | } |
534 | |
535 | r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); |
536 | if (r) |
537 | return r; |
538 | |
539 | radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update); |
540 | } |
541 | |
542 | return radeon_vm_clear_invalids(rdev, vm); |
543 | } |
544 | |
545 | static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, |
546 | struct radeon_cs_parser *parser) |
547 | { |
548 | struct radeon_fpriv *fpriv = parser->filp->driver_priv; |
549 | struct radeon_vm *vm = &fpriv->vm; |
550 | int r; |
551 | |
552 | if (parser->chunk_ib == NULL((void *)0)) |
553 | return 0; |
554 | if ((parser->cs_flags & RADEON_CS_USE_VM0x02) == 0) |
555 | return 0; |
556 | |
557 | if (parser->const_ib.length_dw) { |
558 | r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib)(rdev)->asic->ring[(parser->ring)]->ib_parse((rdev ), (&parser->const_ib)); |
559 | if (r) { |
560 | return r; |
561 | } |
562 | } |
563 | |
564 | r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib)(rdev)->asic->ring[(parser->ring)]->ib_parse((rdev ), (&parser->ib)); |
565 | if (r) { |
566 | return r; |
567 | } |
568 | |
569 | if (parser->ring == R600_RING_TYPE_UVD_INDEX5) |
570 | radeon_uvd_note_usage(rdev); |
571 | |
572 | mutex_lock(&vm->mutex)rw_enter_write(&vm->mutex); |
573 | r = radeon_bo_vm_update_pte(parser, vm); |
574 | if (r) { |
575 | goto out; |
576 | } |
577 | |
578 | r = radeon_cs_sync_rings(parser); |
579 | if (r) { |
580 | if (r != -ERESTARTSYS4) |
581 | DRM_ERROR("Failed to sync rings: %i\n", r)__drm_err("Failed to sync rings: %i\n", r); |
582 | goto out; |
583 | } |
584 | |
585 | if ((rdev->family >= CHIP_TAHITI) && |
586 | (parser->chunk_const_ib != NULL((void *)0))) { |
587 | r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true1); |
588 | } else { |
589 | r = radeon_ib_schedule(rdev, &parser->ib, NULL((void *)0), true1); |
590 | } |
591 | |
592 | out: |
593 | mutex_unlock(&vm->mutex)rw_exit_write(&vm->mutex); |
594 | return r; |
595 | } |
596 | |
597 | static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r) |
598 | { |
599 | if (r == -EDEADLK11) { |
600 | r = radeon_gpu_reset(rdev); |
601 | if (!r) |
602 | r = -EAGAIN35; |
603 | } |
604 | return r; |
605 | } |
606 | |
607 | static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser) |
608 | { |
609 | struct radeon_cs_chunk *ib_chunk; |
610 | struct radeon_vm *vm = NULL((void *)0); |
611 | int r; |
612 | |
613 | if (parser->chunk_ib == NULL((void *)0)) |
614 | return 0; |
615 | |
616 | if (parser->cs_flags & RADEON_CS_USE_VM0x02) { |
617 | struct radeon_fpriv *fpriv = parser->filp->driver_priv; |
618 | vm = &fpriv->vm; |
619 | |
620 | if ((rdev->family >= CHIP_TAHITI) && |
621 | (parser->chunk_const_ib != NULL((void *)0))) { |
622 | ib_chunk = parser->chunk_const_ib; |
623 | if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE(64 << 10)) { |
624 | DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw)__drm_err("cs IB CONST too big: %d\n", ib_chunk->length_dw ); |
625 | return -EINVAL22; |
626 | } |
627 | r = radeon_ib_get(rdev, parser->ring, &parser->const_ib, |
628 | vm, ib_chunk->length_dw * 4); |
629 | if (r) { |
630 | DRM_ERROR("Failed to get const ib !\n")__drm_err("Failed to get const ib !\n"); |
631 | return r; |
632 | } |
633 | parser->const_ib.is_const_ib = true1; |
634 | parser->const_ib.length_dw = ib_chunk->length_dw; |
635 | if (copy_from_user(parser->const_ib.ptr, |
636 | ib_chunk->user_ptr, |
637 | ib_chunk->length_dw * 4)) |
638 | return -EFAULT14; |
639 | } |
640 | |
641 | ib_chunk = parser->chunk_ib; |
642 | if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE(64 << 10)) { |
643 | DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw)__drm_err("cs IB too big: %d\n", ib_chunk->length_dw); |
644 | return -EINVAL22; |
645 | } |
646 | } |
647 | ib_chunk = parser->chunk_ib; |
648 | |
649 | r = radeon_ib_get(rdev, parser->ring, &parser->ib, |
650 | vm, ib_chunk->length_dw * 4); |
651 | if (r) { |
652 | DRM_ERROR("Failed to get ib !\n")__drm_err("Failed to get ib !\n"); |
653 | return r; |
654 | } |
655 | parser->ib.length_dw = ib_chunk->length_dw; |
656 | if (ib_chunk->kdata) |
657 | memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4)__builtin_memcpy((parser->ib.ptr), (ib_chunk->kdata), ( ib_chunk->length_dw * 4)); |
658 | else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) |
659 | return -EFAULT14; |
660 | return 0; |
661 | } |
662 | |
663 | int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
664 | { |
665 | struct radeon_device *rdev = dev->dev_private; |
666 | struct radeon_cs_parser parser; |
667 | int r; |
668 | |
669 | down_read(&rdev->exclusive_lock)rw_enter_read(&rdev->exclusive_lock); |
670 | if (!rdev->accel_working) { |
671 | up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock); |
672 | return -EBUSY16; |
673 | } |
674 | if (rdev->in_reset) { |
675 | up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock); |
676 | r = radeon_gpu_reset(rdev); |
677 | if (!r) |
678 | r = -EAGAIN35; |
679 | return r; |
680 | } |
681 | /* initialize parser */ |
682 | memset(&parser, 0, sizeof(struct radeon_cs_parser))__builtin_memset((&parser), (0), (sizeof(struct radeon_cs_parser ))); |
683 | parser.filp = filp; |
684 | parser.rdev = rdev; |
685 | parser.dev = rdev->dev; |
686 | parser.family = rdev->family; |
687 | r = radeon_cs_parser_init(&parser, data); |
688 | if (r) { |
689 | DRM_ERROR("Failed to initialize parser !\n")__drm_err("Failed to initialize parser !\n"); |
690 | radeon_cs_parser_fini(&parser, r, false0); |
691 | up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock); |
692 | r = radeon_cs_handle_lockup(rdev, r); |
693 | return r; |
694 | } |
695 | |
696 | r = radeon_cs_ib_fill(rdev, &parser); |
697 | if (!r) { |
698 | r = radeon_cs_parser_relocs(&parser); |
699 | if (r && r != -ERESTARTSYS4) |
700 | DRM_ERROR("Failed to parse relocation %d!\n", r)__drm_err("Failed to parse relocation %d!\n", r); |
701 | } |
702 | |
703 | if (r) { |
704 | radeon_cs_parser_fini(&parser, r, false0); |
705 | up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock); |
706 | r = radeon_cs_handle_lockup(rdev, r); |
707 | return r; |
708 | } |
709 | |
710 | trace_radeon_cs(&parser); |
711 | |
712 | r = radeon_cs_ib_chunk(rdev, &parser); |
713 | if (r) { |
714 | goto out; |
715 | } |
716 | r = radeon_cs_ib_vm_chunk(rdev, &parser); |
717 | if (r) { |
718 | goto out; |
719 | } |
720 | out: |
721 | radeon_cs_parser_fini(&parser, r, true1); |
722 | up_read(&rdev->exclusive_lock)rw_exit_read(&rdev->exclusive_lock); |
723 | r = radeon_cs_handle_lockup(rdev, r); |
724 | return r; |
725 | } |
726 | |
727 | /** |
728 | * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet |
729 | * @parser: parser structure holding parsing context. |
730 | * @pkt: where to store packet information |
731 | * |
732 | * Assume that chunk_ib_index is properly set. Will return -EINVAL |
733 | * if packet is bigger than remaining ib size. or if packets is unknown. |
734 | **/ |
735 | int radeon_cs_packet_parse(struct radeon_cs_parser *p, |
736 | struct radeon_cs_packet *pkt, |
737 | unsigned idx) |
738 | { |
739 | struct radeon_cs_chunk *ib_chunk = p->chunk_ib; |
740 | struct radeon_device *rdev = p->rdev; |
741 | uint32_t header; |
742 | int ret = 0, i; |
743 | |
744 | if (idx >= ib_chunk->length_dw) { |
745 | DRM_ERROR("Can not parse packet at %d after CS end %d !\n",__drm_err("Can not parse packet at %d after CS end %d !\n", idx , ib_chunk->length_dw) |
746 | idx, ib_chunk->length_dw)__drm_err("Can not parse packet at %d after CS end %d !\n", idx , ib_chunk->length_dw); |
747 | return -EINVAL22; |
748 | } |
749 | header = radeon_get_ib_value(p, idx); |
750 | pkt->idx = idx; |
751 | pkt->type = RADEON_CP_PACKET_GET_TYPE(header)(((header) >> 30) & 3); |
752 | pkt->count = RADEON_CP_PACKET_GET_COUNT(header)(((header) >> 16) & 0x3FFF); |
753 | pkt->one_reg_wr = 0; |
754 | switch (pkt->type) { |
755 | case RADEON_PACKET_TYPE00: |
756 | if (rdev->family < CHIP_R600) { |
757 | pkt->reg = R100_CP_PACKET0_GET_REG(header)(((header) & 0x1FFF) << 2); |
758 | pkt->one_reg_wr = |
759 | RADEON_CP_PACKET0_GET_ONE_REG_WR(header)(((header) >> 15) & 1); |
760 | } else |
761 | pkt->reg = R600_CP_PACKET0_GET_REG(header)(((header) & 0xFFFF) << 2); |
762 | break; |
763 | case RADEON_PACKET_TYPE33: |
764 | pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header)(((header) >> 8) & 0xFF); |
765 | break; |
766 | case RADEON_PACKET_TYPE22: |
767 | pkt->count = -1; |
768 | break; |
769 | default: |
770 | DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx)__drm_err("Unknown packet type %d at %d !\n", pkt->type, idx ); |
771 | ret = -EINVAL22; |
772 | goto dump_ib; |
773 | } |
774 | if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { |
775 | DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",__drm_err("Packet (%d:%d:%d) end after CS buffer (%d) !\n", pkt ->idx, pkt->type, pkt->count, ib_chunk->length_dw ) |
776 | pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw)__drm_err("Packet (%d:%d:%d) end after CS buffer (%d) !\n", pkt ->idx, pkt->type, pkt->count, ib_chunk->length_dw ); |
777 | ret = -EINVAL22; |
778 | goto dump_ib; |
779 | } |
780 | return 0; |
781 | |
782 | dump_ib: |
783 | for (i = 0; i < ib_chunk->length_dw; i++) { |
784 | if (i == idx) |
785 | printk("\t0x%08x <---\n", radeon_get_ib_value(p, i)); |
786 | else |
787 | printk("\t0x%08x\n", radeon_get_ib_value(p, i)); |
788 | } |
789 | return ret; |
790 | } |
791 | |
792 | /** |
793 | * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP |
794 | * @p: structure holding the parser context. |
795 | * |
796 | * Check if the next packet is NOP relocation packet3. |
797 | **/ |
798 | bool_Bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) |
799 | { |
800 | struct radeon_cs_packet p3reloc; |
801 | int r; |
802 | |
803 | r = radeon_cs_packet_parse(p, &p3reloc, p->idx); |
804 | if (r) |
805 | return false0; |
806 | if (p3reloc.type != RADEON_PACKET_TYPE33) |
807 | return false0; |
808 | if (p3reloc.opcode != RADEON_PACKET3_NOP0x10) |
809 | return false0; |
810 | return true1; |
811 | } |
812 | |
813 | /** |
814 | * radeon_cs_dump_packet() - dump raw packet context |
815 | * @p: structure holding the parser context. |
816 | * @pkt: structure holding the packet. |
817 | * |
818 | * Used mostly for debugging and error reporting. |
819 | **/ |
820 | void radeon_cs_dump_packet(struct radeon_cs_parser *p, |
821 | struct radeon_cs_packet *pkt) |
822 | { |
823 | volatile uint32_t *ib; |
824 | unsigned i; |
825 | unsigned idx; |
826 | |
827 | ib = p->ib.ptr; |
828 | idx = pkt->idx; |
829 | for (i = 0; i <= (pkt->count + 1); i++, idx++) |
830 | DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx])printk("\0016" "[" "drm" "] " "ib[%d]=0x%08X\n", idx, ib[idx] ); |
831 | } |
832 | |
833 | /** |
834 | * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet |
835 | * @parser: parser structure holding parsing context. |
836 | * @data: pointer to relocation data |
837 | * @offset_start: starting offset |
838 | * @offset_mask: offset mask (to align start offset on) |
839 | * @reloc: reloc informations |
840 | * |
841 | * Check if next packet is relocation packet3, do bo validation and compute |
842 | * GPU offset using the provided start. |
843 | **/ |
844 | int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, |
845 | struct radeon_bo_list **cs_reloc, |
846 | int nomm) |
847 | { |
848 | struct radeon_cs_chunk *relocs_chunk; |
849 | struct radeon_cs_packet p3reloc; |
850 | unsigned idx; |
851 | int r; |
852 | |
853 | if (p->chunk_relocs == NULL((void *)0)) { |
854 | DRM_ERROR("No relocation chunk !\n")__drm_err("No relocation chunk !\n"); |
855 | return -EINVAL22; |
856 | } |
857 | *cs_reloc = NULL((void *)0); |
858 | relocs_chunk = p->chunk_relocs; |
859 | r = radeon_cs_packet_parse(p, &p3reloc, p->idx); |
860 | if (r) |
861 | return r; |
862 | p->idx += p3reloc.count + 2; |
863 | if (p3reloc.type != RADEON_PACKET_TYPE33 || |
864 | p3reloc.opcode != RADEON_PACKET3_NOP0x10) { |
865 | DRM_ERROR("No packet3 for relocation for packet at %d.\n",__drm_err("No packet3 for relocation for packet at %d.\n", p3reloc .idx) |
866 | p3reloc.idx)__drm_err("No packet3 for relocation for packet at %d.\n", p3reloc .idx); |
867 | radeon_cs_dump_packet(p, &p3reloc); |
868 | return -EINVAL22; |
869 | } |
870 | idx = radeon_get_ib_value(p, p3reloc.idx + 1); |
871 | if (idx >= relocs_chunk->length_dw) { |
872 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",__drm_err("Relocs at %d after relocations chunk end %d !\n", idx , relocs_chunk->length_dw) |
873 | idx, relocs_chunk->length_dw)__drm_err("Relocs at %d after relocations chunk end %d !\n", idx , relocs_chunk->length_dw); |
874 | radeon_cs_dump_packet(p, &p3reloc); |
875 | return -EINVAL22; |
876 | } |
877 | /* FIXME: we assume reloc size is 4 dwords */ |
878 | if (nomm) { |
879 | *cs_reloc = p->relocs; |
880 | (*cs_reloc)->gpu_offset = |
881 | (u64)relocs_chunk->kdata[idx + 3] << 32; |
882 | (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; |
883 | } else |
884 | *cs_reloc = &p->relocs[(idx / 4)]; |
885 | return 0; |
886 | } |