File: | dev/pci/drm/amd/amdgpu/amdgpu_cs.c |
Warning: | line 944, column 9 Access to field 'rq' results in a dereference of a null pointer (loaded from field 'entity') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * Copyright 2008 Jerome Glisse. | |||
3 | * All Rights Reserved. | |||
4 | * | |||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
6 | * copy of this software and associated documentation files (the "Software"), | |||
7 | * to deal in the Software without restriction, including without limitation | |||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
9 | * and/or sell copies of the Software, and to permit persons to whom the | |||
10 | * Software is furnished to do so, subject to the following conditions: | |||
11 | * | |||
12 | * The above copyright notice and this permission notice (including the next | |||
13 | * paragraph) shall be included in all copies or substantial portions of the | |||
14 | * Software. | |||
15 | * | |||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |||
22 | * DEALINGS IN THE SOFTWARE. | |||
23 | * | |||
24 | * Authors: | |||
25 | * Jerome Glisse <glisse@freedesktop.org> | |||
26 | */ | |||
27 | ||||
28 | #include <linux/file.h> | |||
29 | #include <linux/pagemap.h> | |||
30 | #include <linux/sync_file.h> | |||
31 | #include <linux/dma-buf.h> | |||
32 | ||||
33 | #include <drm/amdgpu_drm.h> | |||
34 | #include <drm/drm_syncobj.h> | |||
35 | #include "amdgpu.h" | |||
36 | #include "amdgpu_trace.h" | |||
37 | #include "amdgpu_gmc.h" | |||
38 | #include "amdgpu_gem.h" | |||
39 | #include "amdgpu_ras.h" | |||
40 | ||||
41 | static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, | |||
42 | struct drm_amdgpu_cs_chunk_fence *data, | |||
43 | uint32_t *offset) | |||
44 | { | |||
45 | struct drm_gem_object *gobj; | |||
46 | struct amdgpu_bo *bo; | |||
47 | unsigned long size; | |||
48 | int r; | |||
49 | ||||
50 | gobj = drm_gem_object_lookup(p->filp, data->handle); | |||
51 | if (gobj == NULL((void *)0)) | |||
52 | return -EINVAL22; | |||
53 | ||||
54 | bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );})); | |||
55 | p->uf_entry.priority = 0; | |||
56 | p->uf_entry.tv.bo = &bo->tbo; | |||
57 | /* One for TTM and one for the CS job */ | |||
58 | p->uf_entry.tv.num_shared = 2; | |||
59 | ||||
60 | drm_gem_object_put(gobj); | |||
61 | ||||
62 | size = amdgpu_bo_size(bo); | |||
63 | if (size != PAGE_SIZE(1 << 12) || (data->offset + 8) > size) { | |||
64 | r = -EINVAL22; | |||
65 | goto error_unref; | |||
66 | } | |||
67 | ||||
68 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { | |||
69 | r = -EINVAL22; | |||
70 | goto error_unref; | |||
71 | } | |||
72 | ||||
73 | *offset = data->offset; | |||
74 | ||||
75 | return 0; | |||
76 | ||||
77 | error_unref: | |||
78 | amdgpu_bo_unref(&bo); | |||
79 | return r; | |||
80 | } | |||
81 | ||||
82 | static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, | |||
83 | struct drm_amdgpu_bo_list_in *data) | |||
84 | { | |||
85 | int r; | |||
86 | struct drm_amdgpu_bo_list_entry *info = NULL((void *)0); | |||
87 | ||||
88 | r = amdgpu_bo_create_list_entry_array(data, &info); | |||
89 | if (r) | |||
90 | return r; | |||
91 | ||||
92 | r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number, | |||
93 | &p->bo_list); | |||
94 | if (r) | |||
95 | goto error_free; | |||
96 | ||||
97 | kvfree(info); | |||
98 | return 0; | |||
99 | ||||
100 | error_free: | |||
101 | if (info) | |||
102 | kvfree(info); | |||
103 | ||||
104 | return r; | |||
105 | } | |||
106 | ||||
107 | static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) | |||
108 | { | |||
109 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | |||
110 | struct amdgpu_vm *vm = &fpriv->vm; | |||
111 | uint64_t *chunk_array_user; | |||
112 | uint64_t *chunk_array; | |||
113 | unsigned size, num_ibs = 0; | |||
114 | uint32_t uf_offset = 0; | |||
115 | int i; | |||
116 | int ret; | |||
117 | ||||
118 | if (cs->in.num_chunks == 0) | |||
119 | return 0; | |||
120 | ||||
121 | chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL(0x0001 | 0x0004)); | |||
122 | if (!chunk_array) | |||
123 | return -ENOMEM12; | |||
124 | ||||
125 | p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); | |||
126 | if (!p->ctx) { | |||
127 | ret = -EINVAL22; | |||
128 | goto free_chunk; | |||
129 | } | |||
130 | ||||
131 | mutex_lock(&p->ctx->lock)rw_enter_write(&p->ctx->lock); | |||
132 | ||||
133 | /* skip guilty context job */ | |||
134 | if (atomic_read(&p->ctx->guilty)({ typeof(*(&p->ctx->guilty)) __tmp = *(volatile typeof (*(&p->ctx->guilty)) *)&(*(&p->ctx->guilty )); membar_datadep_consumer(); __tmp; }) == 1) { | |||
135 | ret = -ECANCELED88; | |||
136 | goto free_chunk; | |||
137 | } | |||
138 | ||||
139 | /* get chunks */ | |||
140 | chunk_array_user = u64_to_user_ptr(cs->in.chunks)((void *)(uintptr_t)(cs->in.chunks)); | |||
141 | if (copy_from_user(chunk_array, chunk_array_user, | |||
142 | sizeof(uint64_t)*cs->in.num_chunks)) { | |||
143 | ret = -EFAULT14; | |||
144 | goto free_chunk; | |||
145 | } | |||
146 | ||||
147 | p->nchunks = cs->in.num_chunks; | |||
148 | p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), | |||
149 | GFP_KERNEL(0x0001 | 0x0004)); | |||
150 | if (!p->chunks) { | |||
151 | ret = -ENOMEM12; | |||
152 | goto free_chunk; | |||
153 | } | |||
154 | ||||
155 | for (i = 0; i < p->nchunks; i++) { | |||
156 | struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL((void *)0); | |||
157 | struct drm_amdgpu_cs_chunk user_chunk; | |||
158 | uint32_t __user *cdata; | |||
159 | ||||
160 | chunk_ptr = u64_to_user_ptr(chunk_array[i])((void *)(uintptr_t)(chunk_array[i])); | |||
161 | if (copy_from_user(&user_chunk, chunk_ptr, | |||
162 | sizeof(struct drm_amdgpu_cs_chunk))) { | |||
163 | ret = -EFAULT14; | |||
164 | i--; | |||
165 | goto free_partial_kdata; | |||
166 | } | |||
167 | p->chunks[i].chunk_id = user_chunk.chunk_id; | |||
168 | p->chunks[i].length_dw = user_chunk.length_dw; | |||
169 | ||||
170 | size = p->chunks[i].length_dw; | |||
171 | cdata = u64_to_user_ptr(user_chunk.chunk_data)((void *)(uintptr_t)(user_chunk.chunk_data)); | |||
172 | ||||
173 | p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL(0x0001 | 0x0004)); | |||
174 | if (p->chunks[i].kdata == NULL((void *)0)) { | |||
175 | ret = -ENOMEM12; | |||
176 | i--; | |||
177 | goto free_partial_kdata; | |||
178 | } | |||
179 | size *= sizeof(uint32_t); | |||
180 | if (copy_from_user(p->chunks[i].kdata, cdata, size)) { | |||
181 | ret = -EFAULT14; | |||
182 | goto free_partial_kdata; | |||
183 | } | |||
184 | ||||
185 | switch (p->chunks[i].chunk_id) { | |||
186 | case AMDGPU_CHUNK_ID_IB0x01: | |||
187 | ++num_ibs; | |||
188 | break; | |||
189 | ||||
190 | case AMDGPU_CHUNK_ID_FENCE0x02: | |||
191 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); | |||
192 | if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { | |||
193 | ret = -EINVAL22; | |||
194 | goto free_partial_kdata; | |||
195 | } | |||
196 | ||||
197 | ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata, | |||
198 | &uf_offset); | |||
199 | if (ret) | |||
200 | goto free_partial_kdata; | |||
201 | ||||
202 | break; | |||
203 | ||||
204 | case AMDGPU_CHUNK_ID_BO_HANDLES0x06: | |||
205 | size = sizeof(struct drm_amdgpu_bo_list_in); | |||
206 | if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { | |||
207 | ret = -EINVAL22; | |||
208 | goto free_partial_kdata; | |||
209 | } | |||
210 | ||||
211 | ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata); | |||
212 | if (ret) | |||
213 | goto free_partial_kdata; | |||
214 | ||||
215 | break; | |||
216 | ||||
217 | case AMDGPU_CHUNK_ID_DEPENDENCIES0x03: | |||
218 | case AMDGPU_CHUNK_ID_SYNCOBJ_IN0x04: | |||
219 | case AMDGPU_CHUNK_ID_SYNCOBJ_OUT0x05: | |||
220 | case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES0x07: | |||
221 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT0x08: | |||
222 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL0x09: | |||
223 | break; | |||
224 | ||||
225 | default: | |||
226 | ret = -EINVAL22; | |||
227 | goto free_partial_kdata; | |||
228 | } | |||
229 | } | |||
230 | ||||
231 | ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm); | |||
232 | if (ret) | |||
233 | goto free_all_kdata; | |||
234 | ||||
235 | if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) { | |||
236 | ret = -ECANCELED88; | |||
237 | goto free_all_kdata; | |||
238 | } | |||
239 | ||||
240 | if (p->uf_entry.tv.bo) | |||
241 | p->job->uf_addr = uf_offset; | |||
242 | kfree(chunk_array); | |||
243 | ||||
244 | /* Use this opportunity to fill in task info for the vm */ | |||
245 | amdgpu_vm_set_task_info(vm); | |||
246 | ||||
247 | return 0; | |||
248 | ||||
249 | free_all_kdata: | |||
250 | i = p->nchunks - 1; | |||
251 | free_partial_kdata: | |||
252 | for (; i >= 0; i--) | |||
253 | kvfree(p->chunks[i].kdata); | |||
254 | kfree(p->chunks); | |||
255 | p->chunks = NULL((void *)0); | |||
256 | p->nchunks = 0; | |||
257 | free_chunk: | |||
258 | kfree(chunk_array); | |||
259 | ||||
260 | return ret; | |||
261 | } | |||
262 | ||||
263 | /* Convert microseconds to bytes. */ | |||
264 | static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) | |||
265 | { | |||
266 | if (us <= 0 || !adev->mm_stats.log2_max_MBps) | |||
267 | return 0; | |||
268 | ||||
269 | /* Since accum_us is incremented by a million per second, just | |||
270 | * multiply it by the number of MB/s to get the number of bytes. | |||
271 | */ | |||
272 | return us << adev->mm_stats.log2_max_MBps; | |||
273 | } | |||
274 | ||||
275 | static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) | |||
276 | { | |||
277 | if (!adev->mm_stats.log2_max_MBps) | |||
278 | return 0; | |||
279 | ||||
280 | return bytes >> adev->mm_stats.log2_max_MBps; | |||
281 | } | |||
282 | ||||
283 | /* Returns how many bytes TTM can move right now. If no bytes can be moved, | |||
284 | * it returns 0. If it returns non-zero, it's OK to move at least one buffer, | |||
285 | * which means it can go over the threshold once. If that happens, the driver | |||
286 | * will be in debt and no other buffer migrations can be done until that debt | |||
287 | * is repaid. | |||
288 | * | |||
289 | * This approach allows moving a buffer of any size (it's important to allow | |||
290 | * that). | |||
291 | * | |||
292 | * The currency is simply time in microseconds and it increases as the clock | |||
293 | * ticks. The accumulated microseconds (us) are converted to bytes and | |||
294 | * returned. | |||
295 | */ | |||
296 | static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, | |||
297 | u64 *max_bytes, | |||
298 | u64 *max_vis_bytes) | |||
299 | { | |||
300 | s64 time_us, increment_us; | |||
301 | u64 free_vram, total_vram, used_vram; | |||
302 | struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM2); | |||
303 | /* Allow a maximum of 200 accumulated ms. This is basically per-IB | |||
304 | * throttling. | |||
305 | * | |||
306 | * It means that in order to get full max MBps, at least 5 IBs per | |||
307 | * second must be submitted and not more than 200ms apart from each | |||
308 | * other. | |||
309 | */ | |||
310 | const s64 us_upper_bound = 200000; | |||
311 | ||||
312 | if (!adev->mm_stats.log2_max_MBps) { | |||
313 | *max_bytes = 0; | |||
314 | *max_vis_bytes = 0; | |||
315 | return; | |||
316 | } | |||
317 | ||||
318 | total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size)({ typeof(*(&adev->vram_pin_size)) __tmp = *(volatile typeof (*(&adev->vram_pin_size)) *)&(*(&adev->vram_pin_size )); membar_datadep_consumer(); __tmp; }); | |||
319 | used_vram = amdgpu_vram_mgr_usage(vram_man); | |||
320 | free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; | |||
321 | ||||
322 | spin_lock(&adev->mm_stats.lock)mtx_enter(&adev->mm_stats.lock); | |||
323 | ||||
324 | /* Increase the amount of accumulated us. */ | |||
325 | time_us = ktime_to_us(ktime_get()); | |||
326 | increment_us = time_us - adev->mm_stats.last_update_us; | |||
327 | adev->mm_stats.last_update_us = time_us; | |||
328 | adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,(((adev->mm_stats.accum_us + increment_us)<(us_upper_bound ))?(adev->mm_stats.accum_us + increment_us):(us_upper_bound )) | |||
329 | us_upper_bound)(((adev->mm_stats.accum_us + increment_us)<(us_upper_bound ))?(adev->mm_stats.accum_us + increment_us):(us_upper_bound )); | |||
330 | ||||
331 | /* This prevents the short period of low performance when the VRAM | |||
332 | * usage is low and the driver is in debt or doesn't have enough | |||
333 | * accumulated us to fill VRAM quickly. | |||
334 | * | |||
335 | * The situation can occur in these cases: | |||
336 | * - a lot of VRAM is freed by userspace | |||
337 | * - the presence of a big buffer causes a lot of evictions | |||
338 | * (solution: split buffers into smaller ones) | |||
339 | * | |||
340 | * If 128 MB or 1/8th of VRAM is free, start filling it now by setting | |||
341 | * accum_us to a positive number. | |||
342 | */ | |||
343 | if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { | |||
344 | s64 min_us; | |||
345 | ||||
346 | /* Be more aggresive on dGPUs. Try to fill a portion of free | |||
347 | * VRAM now. | |||
348 | */ | |||
349 | if (!(adev->flags & AMD_IS_APU)) | |||
350 | min_us = bytes_to_us(adev, free_vram / 4); | |||
351 | else | |||
352 | min_us = 0; /* Reset accum_us on APUs. */ | |||
353 | ||||
354 | adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us)(((min_us)>(adev->mm_stats.accum_us))?(min_us):(adev-> mm_stats.accum_us)); | |||
355 | } | |||
356 | ||||
357 | /* This is set to 0 if the driver is in debt to disallow (optional) | |||
358 | * buffer moves. | |||
359 | */ | |||
360 | *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); | |||
361 | ||||
362 | /* Do the same for visible VRAM if half of it is free */ | |||
363 | if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { | |||
364 | u64 total_vis_vram = adev->gmc.visible_vram_size; | |||
365 | u64 used_vis_vram = | |||
366 | amdgpu_vram_mgr_vis_usage(vram_man); | |||
367 | ||||
368 | if (used_vis_vram < total_vis_vram) { | |||
369 | u64 free_vis_vram = total_vis_vram - used_vis_vram; | |||
370 | adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +(((adev->mm_stats.accum_us_vis + increment_us)<(us_upper_bound ))?(adev->mm_stats.accum_us_vis + increment_us):(us_upper_bound )) | |||
371 | increment_us, us_upper_bound)(((adev->mm_stats.accum_us_vis + increment_us)<(us_upper_bound ))?(adev->mm_stats.accum_us_vis + increment_us):(us_upper_bound )); | |||
372 | ||||
373 | if (free_vis_vram >= total_vis_vram / 2) | |||
374 | adev->mm_stats.accum_us_vis = | |||
375 | max(bytes_to_us(adev, free_vis_vram / 2),(((bytes_to_us(adev, free_vis_vram / 2))>(adev->mm_stats .accum_us_vis))?(bytes_to_us(adev, free_vis_vram / 2)):(adev-> mm_stats.accum_us_vis)) | |||
376 | adev->mm_stats.accum_us_vis)(((bytes_to_us(adev, free_vis_vram / 2))>(adev->mm_stats .accum_us_vis))?(bytes_to_us(adev, free_vis_vram / 2)):(adev-> mm_stats.accum_us_vis)); | |||
377 | } | |||
378 | ||||
379 | *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); | |||
380 | } else { | |||
381 | *max_vis_bytes = 0; | |||
382 | } | |||
383 | ||||
384 | spin_unlock(&adev->mm_stats.lock)mtx_leave(&adev->mm_stats.lock); | |||
385 | } | |||
386 | ||||
387 | /* Report how many bytes have really been moved for the last command | |||
388 | * submission. This can result in a debt that can stop buffer migrations | |||
389 | * temporarily. | |||
390 | */ | |||
391 | void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, | |||
392 | u64 num_vis_bytes) | |||
393 | { | |||
394 | spin_lock(&adev->mm_stats.lock)mtx_enter(&adev->mm_stats.lock); | |||
395 | adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); | |||
396 | adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); | |||
397 | spin_unlock(&adev->mm_stats.lock)mtx_leave(&adev->mm_stats.lock); | |||
398 | } | |||
399 | ||||
400 | static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, | |||
401 | struct amdgpu_bo *bo) | |||
402 | { | |||
403 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | |||
404 | struct ttm_operation_ctx ctx = { | |||
405 | .interruptible = true1, | |||
406 | .no_wait_gpu = false0, | |||
407 | .resv = bo->tbo.base.resv, | |||
408 | .flags = 0 | |||
409 | }; | |||
410 | uint32_t domain; | |||
411 | int r; | |||
412 | ||||
413 | if (bo->pin_count) | |||
414 | return 0; | |||
415 | ||||
416 | /* Don't move this buffer if we have depleted our allowance | |||
417 | * to move it. Don't move anything if the threshold is zero. | |||
418 | */ | |||
419 | if (p->bytes_moved < p->bytes_moved_threshold && | |||
420 | (!bo->tbo.base.dma_buf || | |||
421 | list_empty(&bo->tbo.base.dma_buf->attachments))) { | |||
422 | if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && | |||
423 | (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0))) { | |||
424 | /* And don't move a CPU_ACCESS_REQUIRED BO to limited | |||
425 | * visible VRAM if we've depleted our allowance to do | |||
426 | * that. | |||
427 | */ | |||
428 | if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) | |||
429 | domain = bo->preferred_domains; | |||
430 | else | |||
431 | domain = bo->allowed_domains; | |||
432 | } else { | |||
433 | domain = bo->preferred_domains; | |||
434 | } | |||
435 | } else { | |||
436 | domain = bo->allowed_domains; | |||
437 | } | |||
438 | ||||
439 | retry: | |||
440 | amdgpu_bo_placement_from_domain(bo, domain); | |||
441 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); | |||
442 | ||||
443 | p->bytes_moved += ctx.bytes_moved; | |||
444 | if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && | |||
445 | amdgpu_bo_in_cpu_visible_vram(bo)) | |||
446 | p->bytes_moved_vis += ctx.bytes_moved; | |||
447 | ||||
448 | if (unlikely(r == -ENOMEM)__builtin_expect(!!(r == -12), 0) && domain != bo->allowed_domains) { | |||
449 | domain = bo->allowed_domains; | |||
450 | goto retry; | |||
451 | } | |||
452 | ||||
453 | return r; | |||
454 | } | |||
455 | ||||
456 | static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) | |||
457 | { | |||
458 | struct amdgpu_cs_parser *p = param; | |||
459 | int r; | |||
460 | ||||
461 | r = amdgpu_cs_bo_validate(p, bo); | |||
462 | if (r) | |||
463 | return r; | |||
464 | ||||
465 | if (bo->shadow) | |||
466 | r = amdgpu_cs_bo_validate(p, bo->shadow); | |||
467 | ||||
468 | return r; | |||
469 | } | |||
470 | ||||
471 | static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, | |||
472 | struct list_head *validated) | |||
473 | { | |||
474 | struct ttm_operation_ctx ctx = { true1, false0 }; | |||
475 | struct amdgpu_bo_list_entry *lobj; | |||
476 | int r; | |||
477 | ||||
478 | list_for_each_entry(lobj, validated, tv.head)for (lobj = ({ const __typeof( ((__typeof(*lobj) *)0)->tv. head ) *__mptr = ((validated)->next); (__typeof(*lobj) *)( (char *)__mptr - __builtin_offsetof(__typeof(*lobj), tv.head ) );}); &lobj->tv.head != (validated); lobj = ({ const __typeof( ((__typeof(*lobj) *)0)->tv.head ) *__mptr = (lobj ->tv.head.next); (__typeof(*lobj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*lobj), tv.head) );})) { | |||
479 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo); | |||
480 | struct mm_struct *usermm; | |||
481 | ||||
482 | #ifdef notyet | |||
483 | usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); | |||
484 | if (usermm && usermm != current->mm) | |||
485 | return -EPERM1; | |||
486 | #endif | |||
487 | ||||
488 | if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) && | |||
489 | lobj->user_invalidated && lobj->user_pages) { | |||
490 | amdgpu_bo_placement_from_domain(bo, | |||
491 | AMDGPU_GEM_DOMAIN_CPU0x1); | |||
492 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); | |||
493 | if (r) | |||
494 | return r; | |||
495 | ||||
496 | amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, | |||
497 | lobj->user_pages); | |||
498 | } | |||
499 | ||||
500 | r = amdgpu_cs_validate(p, bo); | |||
501 | if (r) | |||
502 | return r; | |||
503 | ||||
504 | kvfree(lobj->user_pages); | |||
505 | lobj->user_pages = NULL((void *)0); | |||
506 | } | |||
507 | return 0; | |||
508 | } | |||
509 | ||||
510 | static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
511 | union drm_amdgpu_cs *cs) | |||
512 | { | |||
513 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | |||
514 | struct amdgpu_vm *vm = &fpriv->vm; | |||
515 | struct amdgpu_bo_list_entry *e; | |||
516 | struct list_head duplicates; | |||
517 | struct amdgpu_bo *gds; | |||
518 | struct amdgpu_bo *gws; | |||
519 | struct amdgpu_bo *oa; | |||
520 | int r; | |||
521 | ||||
522 | INIT_LIST_HEAD(&p->validated); | |||
523 | ||||
524 | /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ | |||
525 | if (cs->in.bo_list_handle) { | |||
526 | if (p->bo_list) | |||
527 | return -EINVAL22; | |||
528 | ||||
529 | r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle, | |||
530 | &p->bo_list); | |||
531 | if (r) | |||
532 | return r; | |||
533 | } else if (!p->bo_list) { | |||
534 | /* Create a empty bo_list when no handle is provided */ | |||
535 | r = amdgpu_bo_list_create(p->adev, p->filp, NULL((void *)0), 0, | |||
536 | &p->bo_list); | |||
537 | if (r) | |||
538 | return r; | |||
539 | } | |||
540 | ||||
541 | /* One for TTM and one for the CS job */ | |||
542 | amdgpu_bo_list_for_each_entry(e, p->bo_list)for (e = amdgpu_bo_list_array_entry(p->bo_list, 0); e != amdgpu_bo_list_array_entry (p->bo_list, (p->bo_list)->num_entries); ++e) | |||
543 | e->tv.num_shared = 2; | |||
544 | ||||
545 | amdgpu_bo_list_get_list(p->bo_list, &p->validated); | |||
546 | ||||
547 | INIT_LIST_HEAD(&duplicates); | |||
548 | amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); | |||
549 | ||||
550 | if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent) | |||
551 | list_add(&p->uf_entry.tv.head, &p->validated); | |||
552 | ||||
553 | /* Get userptr backing pages. If pages are updated after registered | |||
554 | * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do | |||
555 | * amdgpu_ttm_backend_bind() to flush and invalidate new pages | |||
556 | */ | |||
557 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list)for (e = amdgpu_bo_list_array_entry(p->bo_list, (p->bo_list )->first_userptr); e != amdgpu_bo_list_array_entry(p->bo_list , (p->bo_list)->num_entries); ++e) { | |||
558 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); | |||
559 | bool_Bool userpage_invalidated = false0; | |||
560 | int i; | |||
561 | ||||
562 | e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, | |||
563 | sizeof(struct vm_page *), | |||
564 | GFP_KERNEL(0x0001 | 0x0004) | __GFP_ZERO0x0008); | |||
565 | if (!e->user_pages) { | |||
566 | DRM_ERROR("calloc failure\n")__drm_err("calloc failure\n"); | |||
567 | return -ENOMEM12; | |||
568 | } | |||
569 | ||||
570 | r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages); | |||
571 | if (r) { | |||
572 | kvfree(e->user_pages); | |||
573 | e->user_pages = NULL((void *)0); | |||
574 | return r; | |||
575 | } | |||
576 | ||||
577 | for (i = 0; i < bo->tbo.ttm->num_pages; i++) { | |||
578 | if (bo->tbo.ttm->pages[i] != e->user_pages[i]) { | |||
579 | userpage_invalidated = true1; | |||
580 | break; | |||
581 | } | |||
582 | } | |||
583 | e->user_invalidated = userpage_invalidated; | |||
584 | } | |||
585 | ||||
586 | r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true1, | |||
587 | &duplicates); | |||
588 | if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) { | |||
589 | if (r != -ERESTARTSYS4) | |||
590 | DRM_ERROR("ttm_eu_reserve_buffers failed.\n")__drm_err("ttm_eu_reserve_buffers failed.\n"); | |||
591 | goto out; | |||
592 | } | |||
593 | ||||
594 | amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, | |||
595 | &p->bytes_moved_vis_threshold); | |||
596 | p->bytes_moved = 0; | |||
597 | p->bytes_moved_vis = 0; | |||
598 | ||||
599 | r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, | |||
600 | amdgpu_cs_validate, p); | |||
601 | if (r) { | |||
602 | DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n")__drm_err("amdgpu_vm_validate_pt_bos() failed.\n"); | |||
603 | goto error_validate; | |||
604 | } | |||
605 | ||||
606 | r = amdgpu_cs_list_validate(p, &duplicates); | |||
607 | if (r) | |||
608 | goto error_validate; | |||
609 | ||||
610 | r = amdgpu_cs_list_validate(p, &p->validated); | |||
611 | if (r) | |||
612 | goto error_validate; | |||
613 | ||||
614 | amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, | |||
615 | p->bytes_moved_vis); | |||
616 | ||||
617 | gds = p->bo_list->gds_obj; | |||
618 | gws = p->bo_list->gws_obj; | |||
619 | oa = p->bo_list->oa_obj; | |||
620 | ||||
621 | amdgpu_bo_list_for_each_entry(e, p->bo_list)for (e = amdgpu_bo_list_array_entry(p->bo_list, 0); e != amdgpu_bo_list_array_entry (p->bo_list, (p->bo_list)->num_entries); ++e) { | |||
622 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); | |||
623 | ||||
624 | /* Make sure we use the exclusive slot for shared BOs */ | |||
625 | if (bo->prime_shared_count) | |||
626 | e->tv.num_shared = 0; | |||
627 | e->bo_va = amdgpu_vm_bo_find(vm, bo); | |||
628 | } | |||
629 | ||||
630 | if (gds) { | |||
631 | p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT12; | |||
632 | p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT12; | |||
633 | } | |||
634 | if (gws) { | |||
635 | p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT12; | |||
636 | p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT12; | |||
637 | } | |||
638 | if (oa) { | |||
639 | p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT12; | |||
640 | p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT12; | |||
641 | } | |||
642 | ||||
643 | if (!r && p->uf_entry.tv.bo) { | |||
644 | struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo); | |||
645 | ||||
646 | r = amdgpu_ttm_alloc_gart(&uf->tbo); | |||
647 | p->job->uf_addr += amdgpu_bo_gpu_offset(uf); | |||
648 | } | |||
649 | ||||
650 | error_validate: | |||
651 | if (r) | |||
652 | ttm_eu_backoff_reservation(&p->ticket, &p->validated); | |||
653 | out: | |||
654 | return r; | |||
655 | } | |||
656 | ||||
657 | static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) | |||
658 | { | |||
659 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | |||
660 | struct amdgpu_bo_list_entry *e; | |||
661 | int r; | |||
662 | ||||
663 | list_for_each_entry(e, &p->validated, tv.head)for (e = ({ const __typeof( ((__typeof(*e) *)0)->tv.head ) *__mptr = ((&p->validated)->next); (__typeof(*e) * )( (char *)__mptr - __builtin_offsetof(__typeof(*e), tv.head) );}); &e->tv.head != (&p->validated); e = ({ const __typeof( ((__typeof(*e) *)0)->tv.head ) *__mptr = (e-> tv.head.next); (__typeof(*e) *)( (char *)__mptr - __builtin_offsetof (__typeof(*e), tv.head) );})) { | |||
664 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); | |||
665 | struct dma_resv *resv = bo->tbo.base.resv; | |||
666 | enum amdgpu_sync_mode sync_mode; | |||
667 | ||||
668 | sync_mode = amdgpu_bo_explicit_sync(bo) ? | |||
669 | AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; | |||
670 | r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, | |||
671 | &fpriv->vm); | |||
672 | if (r) | |||
673 | return r; | |||
674 | } | |||
675 | return 0; | |||
676 | } | |||
677 | ||||
678 | /** | |||
679 | * cs_parser_fini() - clean parser states | |||
680 | * @parser: parser structure holding parsing context. | |||
681 | * @error: error number | |||
682 | * | |||
683 | * If error is set than unvalidate buffer, otherwise just free memory | |||
684 | * used by parsing context. | |||
685 | **/ | |||
686 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, | |||
687 | bool_Bool backoff) | |||
688 | { | |||
689 | unsigned i; | |||
690 | ||||
691 | if (error && backoff) | |||
692 | ttm_eu_backoff_reservation(&parser->ticket, | |||
693 | &parser->validated); | |||
694 | ||||
695 | for (i = 0; i < parser->num_post_deps; i++) { | |||
696 | drm_syncobj_put(parser->post_deps[i].syncobj); | |||
697 | kfree(parser->post_deps[i].chain); | |||
698 | } | |||
699 | kfree(parser->post_deps); | |||
700 | ||||
701 | dma_fence_put(parser->fence); | |||
702 | ||||
703 | if (parser->ctx) { | |||
704 | mutex_unlock(&parser->ctx->lock)rw_exit_write(&parser->ctx->lock); | |||
705 | amdgpu_ctx_put(parser->ctx); | |||
706 | } | |||
707 | if (parser->bo_list) | |||
708 | amdgpu_bo_list_put(parser->bo_list); | |||
709 | ||||
710 | for (i = 0; i < parser->nchunks; i++) | |||
711 | kvfree(parser->chunks[i].kdata); | |||
712 | kfree(parser->chunks); | |||
713 | if (parser->job) | |||
714 | amdgpu_job_free(parser->job); | |||
715 | if (parser->uf_entry.tv.bo) { | |||
716 | struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo); | |||
717 | ||||
718 | amdgpu_bo_unref(&uf); | |||
719 | } | |||
720 | } | |||
721 | ||||
722 | static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) | |||
723 | { | |||
724 | struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched)({ const __typeof( ((struct amdgpu_ring *)0)->sched ) *__mptr = ((p->entity->rq->sched)); (struct amdgpu_ring *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_ring, sched ) );}); | |||
725 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | |||
726 | struct amdgpu_device *adev = p->adev; | |||
727 | struct amdgpu_vm *vm = &fpriv->vm; | |||
728 | struct amdgpu_bo_list_entry *e; | |||
729 | struct amdgpu_bo_va *bo_va; | |||
730 | struct amdgpu_bo *bo; | |||
731 | int r; | |||
732 | ||||
733 | /* Only for UVD/VCE VM emulation */ | |||
734 | if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) { | |||
735 | unsigned i, j; | |||
736 | ||||
737 | for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { | |||
738 | struct drm_amdgpu_cs_chunk_ib *chunk_ib; | |||
739 | struct amdgpu_bo_va_mapping *m; | |||
740 | struct amdgpu_bo *aobj = NULL((void *)0); | |||
741 | struct amdgpu_cs_chunk *chunk; | |||
742 | uint64_t offset, va_start; | |||
743 | struct amdgpu_ib *ib; | |||
744 | uint8_t *kptr; | |||
745 | ||||
746 | chunk = &p->chunks[i]; | |||
747 | ib = &p->job->ibs[j]; | |||
748 | chunk_ib = chunk->kdata; | |||
749 | ||||
750 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB0x01) | |||
751 | continue; | |||
752 | ||||
753 | va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK0x0000ffffffffffffULL; | |||
754 | r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m); | |||
755 | if (r) { | |||
756 | DRM_ERROR("IB va_start is invalid\n")__drm_err("IB va_start is invalid\n"); | |||
757 | return r; | |||
758 | } | |||
759 | ||||
760 | if ((va_start + chunk_ib->ib_bytes) > | |||
761 | (m->last + 1) * AMDGPU_GPU_PAGE_SIZE4096) { | |||
762 | DRM_ERROR("IB va_start+ib_bytes is invalid\n")__drm_err("IB va_start+ib_bytes is invalid\n"); | |||
763 | return -EINVAL22; | |||
764 | } | |||
765 | ||||
766 | /* the IB should be reserved at this point */ | |||
767 | r = amdgpu_bo_kmap(aobj, (void **)&kptr); | |||
768 | if (r) { | |||
769 | return r; | |||
770 | } | |||
771 | ||||
772 | offset = m->start * AMDGPU_GPU_PAGE_SIZE4096; | |||
773 | kptr += va_start - offset; | |||
774 | ||||
775 | if (ring->funcs->parse_cs) { | |||
776 | memcpy(ib->ptr, kptr, chunk_ib->ib_bytes)__builtin_memcpy((ib->ptr), (kptr), (chunk_ib->ib_bytes )); | |||
777 | amdgpu_bo_kunmap(aobj); | |||
778 | ||||
779 | r = amdgpu_ring_parse_cs(ring, p, j)((ring)->funcs->parse_cs((p), (j))); | |||
780 | if (r) | |||
781 | return r; | |||
782 | } else { | |||
783 | ib->ptr = (uint32_t *)kptr; | |||
784 | r = amdgpu_ring_patch_cs_in_place(ring, p, j)((ring)->funcs->patch_cs_in_place((p), (j))); | |||
785 | amdgpu_bo_kunmap(aobj); | |||
786 | if (r) | |||
787 | return r; | |||
788 | } | |||
789 | ||||
790 | j++; | |||
791 | } | |||
792 | } | |||
793 | ||||
794 | if (!p->job->vm) | |||
795 | return amdgpu_cs_sync_rings(p); | |||
796 | ||||
797 | ||||
798 | r = amdgpu_vm_clear_freed(adev, vm, NULL((void *)0)); | |||
799 | if (r) | |||
800 | return r; | |||
801 | ||||
802 | r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false0); | |||
803 | if (r) | |||
804 | return r; | |||
805 | ||||
806 | r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update); | |||
807 | if (r) | |||
808 | return r; | |||
809 | ||||
810 | if (amdgpu_mcbp || amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { | |||
811 | bo_va = fpriv->csa_va; | |||
812 | BUG_ON(!bo_va)((!(!bo_va)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c" , 812, "!(!bo_va)")); | |||
813 | r = amdgpu_vm_bo_update(adev, bo_va, false0); | |||
814 | if (r) | |||
815 | return r; | |||
816 | ||||
817 | r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update); | |||
818 | if (r) | |||
819 | return r; | |||
820 | } | |||
821 | ||||
822 | amdgpu_bo_list_for_each_entry(e, p->bo_list)for (e = amdgpu_bo_list_array_entry(p->bo_list, 0); e != amdgpu_bo_list_array_entry (p->bo_list, (p->bo_list)->num_entries); ++e) { | |||
823 | /* ignore duplicates */ | |||
824 | bo = ttm_to_amdgpu_bo(e->tv.bo); | |||
825 | if (!bo) | |||
826 | continue; | |||
827 | ||||
828 | bo_va = e->bo_va; | |||
829 | if (bo_va == NULL((void *)0)) | |||
830 | continue; | |||
831 | ||||
832 | r = amdgpu_vm_bo_update(adev, bo_va, false0); | |||
833 | if (r) | |||
834 | return r; | |||
835 | ||||
836 | r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update); | |||
837 | if (r) | |||
838 | return r; | |||
839 | } | |||
840 | ||||
841 | r = amdgpu_vm_handle_moved(adev, vm); | |||
842 | if (r) | |||
843 | return r; | |||
844 | ||||
845 | r = amdgpu_vm_update_pdes(adev, vm, false0); | |||
846 | if (r) | |||
847 | return r; | |||
848 | ||||
849 | r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update); | |||
850 | if (r) | |||
851 | return r; | |||
852 | ||||
853 | p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); | |||
854 | ||||
855 | if (amdgpu_vm_debug) { | |||
856 | /* Invalidate all BOs to test for userspace bugs */ | |||
857 | amdgpu_bo_list_for_each_entry(e, p->bo_list)for (e = amdgpu_bo_list_array_entry(p->bo_list, 0); e != amdgpu_bo_list_array_entry (p->bo_list, (p->bo_list)->num_entries); ++e) { | |||
858 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); | |||
859 | ||||
860 | /* ignore duplicates */ | |||
861 | if (!bo) | |||
862 | continue; | |||
863 | ||||
864 | amdgpu_vm_bo_invalidate(adev, bo, false0); | |||
865 | } | |||
866 | } | |||
867 | ||||
868 | return amdgpu_cs_sync_rings(p); | |||
869 | } | |||
870 | ||||
871 | static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
872 | struct amdgpu_cs_parser *parser) | |||
873 | { | |||
874 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | |||
875 | struct amdgpu_vm *vm = &fpriv->vm; | |||
876 | int r, ce_preempt = 0, de_preempt = 0; | |||
877 | struct amdgpu_ring *ring; | |||
878 | int i, j; | |||
879 | ||||
880 | for (i = 0, j = 0; i
| |||
881 | struct amdgpu_cs_chunk *chunk; | |||
882 | struct amdgpu_ib *ib; | |||
883 | struct drm_amdgpu_cs_chunk_ib *chunk_ib; | |||
884 | struct drm_sched_entity *entity; | |||
885 | ||||
886 | chunk = &parser->chunks[i]; | |||
887 | ib = &parser->job->ibs[j]; | |||
888 | chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; | |||
889 | ||||
890 | if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB0x01) | |||
891 | continue; | |||
892 | ||||
893 | if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX0 && | |||
894 | (amdgpu_mcbp || amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2)))) { | |||
895 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT(1<<2)) { | |||
896 | if (chunk_ib->flags & AMDGPU_IB_FLAG_CE(1<<0)) | |||
897 | ce_preempt++; | |||
898 | else | |||
899 | de_preempt++; | |||
900 | } | |||
901 | ||||
902 | /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */ | |||
903 | if (ce_preempt > 1 || de_preempt > 1) | |||
904 | return -EINVAL22; | |||
905 | } | |||
906 | ||||
907 | r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type, | |||
908 | chunk_ib->ip_instance, chunk_ib->ring, | |||
909 | &entity); | |||
910 | if (r) | |||
911 | return r; | |||
912 | ||||
913 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE(1<<1)) | |||
914 | parser->job->preamble_status |= | |||
915 | AMDGPU_PREAMBLE_IB_PRESENT(1 << 0); | |||
916 | ||||
917 | if (parser->entity && parser->entity != entity) | |||
918 | return -EINVAL22; | |||
919 | ||||
920 | /* Return if there is no run queue associated with this entity. | |||
921 | * Possibly because of disabled HW IP*/ | |||
922 | if (entity->rq == NULL((void *)0)) | |||
923 | return -EINVAL22; | |||
924 | ||||
925 | parser->entity = entity; | |||
926 | ||||
927 | ring = to_amdgpu_ring(entity->rq->sched)({ const __typeof( ((struct amdgpu_ring *)0)->sched ) *__mptr = ((entity->rq->sched)); (struct amdgpu_ring *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_ring, sched) );} ); | |||
928 | r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ? | |||
929 | chunk_ib->ib_bytes : 0, | |||
930 | AMDGPU_IB_POOL_DELAYED, ib); | |||
931 | if (r) { | |||
932 | DRM_ERROR("Failed to get ib !\n")__drm_err("Failed to get ib !\n"); | |||
933 | return r; | |||
934 | } | |||
935 | ||||
936 | ib->gpu_addr = chunk_ib->va_start; | |||
937 | ib->length_dw = chunk_ib->ib_bytes / 4; | |||
938 | ib->flags = chunk_ib->flags; | |||
939 | ||||
940 | j++; | |||
941 | } | |||
942 | ||||
943 | /* MM engine doesn't support user fences */ | |||
944 | ring = to_amdgpu_ring(parser->entity->rq->sched)({ const __typeof( ((struct amdgpu_ring *)0)->sched ) *__mptr = ((parser->entity->rq->sched)); (struct amdgpu_ring *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_ring, sched ) );}); | |||
| ||||
945 | if (parser->job->uf_addr && ring->funcs->no_user_fence) | |||
946 | return -EINVAL22; | |||
947 | ||||
948 | return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity); | |||
949 | } | |||
950 | ||||
951 | static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, | |||
952 | struct amdgpu_cs_chunk *chunk) | |||
953 | { | |||
954 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | |||
955 | unsigned num_deps; | |||
956 | int i, r; | |||
957 | struct drm_amdgpu_cs_chunk_dep *deps; | |||
958 | ||||
959 | deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; | |||
960 | num_deps = chunk->length_dw * 4 / | |||
961 | sizeof(struct drm_amdgpu_cs_chunk_dep); | |||
962 | ||||
963 | for (i = 0; i < num_deps; ++i) { | |||
964 | struct amdgpu_ctx *ctx; | |||
965 | struct drm_sched_entity *entity; | |||
966 | struct dma_fence *fence; | |||
967 | ||||
968 | ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); | |||
969 | if (ctx == NULL((void *)0)) | |||
970 | return -EINVAL22; | |||
971 | ||||
972 | r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type, | |||
973 | deps[i].ip_instance, | |||
974 | deps[i].ring, &entity); | |||
975 | if (r) { | |||
976 | amdgpu_ctx_put(ctx); | |||
977 | return r; | |||
978 | } | |||
979 | ||||
980 | fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); | |||
981 | amdgpu_ctx_put(ctx); | |||
982 | ||||
983 | if (IS_ERR(fence)) | |||
984 | return PTR_ERR(fence); | |||
985 | else if (!fence) | |||
986 | continue; | |||
987 | ||||
988 | if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES0x07) { | |||
989 | struct drm_sched_fence *s_fence; | |||
990 | struct dma_fence *old = fence; | |||
991 | ||||
992 | s_fence = to_drm_sched_fence(fence); | |||
993 | fence = dma_fence_get(&s_fence->scheduled); | |||
994 | dma_fence_put(old); | |||
995 | } | |||
996 | ||||
997 | r = amdgpu_sync_fence(&p->job->sync, fence); | |||
998 | dma_fence_put(fence); | |||
999 | if (r) | |||
1000 | return r; | |||
1001 | } | |||
1002 | return 0; | |||
1003 | } | |||
1004 | ||||
1005 | static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, | |||
1006 | uint32_t handle, u64 point, | |||
1007 | u64 flags) | |||
1008 | { | |||
1009 | struct dma_fence *fence; | |||
1010 | int r; | |||
1011 | ||||
1012 | r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); | |||
1013 | if (r) { | |||
1014 | DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",__drm_err("syncobj %u failed to find fence @ %llu (%d)!\n", handle , point, r) | |||
1015 | handle, point, r)__drm_err("syncobj %u failed to find fence @ %llu (%d)!\n", handle , point, r); | |||
1016 | return r; | |||
1017 | } | |||
1018 | ||||
1019 | r = amdgpu_sync_fence(&p->job->sync, fence); | |||
1020 | dma_fence_put(fence); | |||
1021 | ||||
1022 | return r; | |||
1023 | } | |||
1024 | ||||
1025 | static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p, | |||
1026 | struct amdgpu_cs_chunk *chunk) | |||
1027 | { | |||
1028 | struct drm_amdgpu_cs_chunk_sem *deps; | |||
1029 | unsigned num_deps; | |||
1030 | int i, r; | |||
1031 | ||||
1032 | deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; | |||
1033 | num_deps = chunk->length_dw * 4 / | |||
1034 | sizeof(struct drm_amdgpu_cs_chunk_sem); | |||
1035 | for (i = 0; i < num_deps; ++i) { | |||
1036 | r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle, | |||
1037 | 0, 0); | |||
1038 | if (r) | |||
1039 | return r; | |||
1040 | } | |||
1041 | ||||
1042 | return 0; | |||
1043 | } | |||
1044 | ||||
1045 | ||||
1046 | static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p, | |||
1047 | struct amdgpu_cs_chunk *chunk) | |||
1048 | { | |||
1049 | struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; | |||
1050 | unsigned num_deps; | |||
1051 | int i, r; | |||
1052 | ||||
1053 | syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; | |||
1054 | num_deps = chunk->length_dw * 4 / | |||
1055 | sizeof(struct drm_amdgpu_cs_chunk_syncobj); | |||
1056 | for (i = 0; i < num_deps; ++i) { | |||
1057 | r = amdgpu_syncobj_lookup_and_add_to_sync(p, | |||
1058 | syncobj_deps[i].handle, | |||
1059 | syncobj_deps[i].point, | |||
1060 | syncobj_deps[i].flags); | |||
1061 | if (r) | |||
1062 | return r; | |||
1063 | } | |||
1064 | ||||
1065 | return 0; | |||
1066 | } | |||
1067 | ||||
1068 | static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, | |||
1069 | struct amdgpu_cs_chunk *chunk) | |||
1070 | { | |||
1071 | struct drm_amdgpu_cs_chunk_sem *deps; | |||
1072 | unsigned num_deps; | |||
1073 | int i; | |||
1074 | ||||
1075 | deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; | |||
1076 | num_deps = chunk->length_dw * 4 / | |||
1077 | sizeof(struct drm_amdgpu_cs_chunk_sem); | |||
1078 | ||||
1079 | if (p->post_deps) | |||
1080 | return -EINVAL22; | |||
1081 | ||||
1082 | p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), | |||
1083 | GFP_KERNEL(0x0001 | 0x0004)); | |||
1084 | p->num_post_deps = 0; | |||
1085 | ||||
1086 | if (!p->post_deps) | |||
1087 | return -ENOMEM12; | |||
1088 | ||||
1089 | ||||
1090 | for (i = 0; i < num_deps; ++i) { | |||
1091 | p->post_deps[i].syncobj = | |||
1092 | drm_syncobj_find(p->filp, deps[i].handle); | |||
1093 | if (!p->post_deps[i].syncobj) | |||
1094 | return -EINVAL22; | |||
1095 | p->post_deps[i].chain = NULL((void *)0); | |||
1096 | p->post_deps[i].point = 0; | |||
1097 | p->num_post_deps++; | |||
1098 | } | |||
1099 | ||||
1100 | return 0; | |||
1101 | } | |||
1102 | ||||
1103 | ||||
1104 | static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, | |||
1105 | struct amdgpu_cs_chunk *chunk) | |||
1106 | { | |||
1107 | struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; | |||
1108 | unsigned num_deps; | |||
1109 | int i; | |||
1110 | ||||
1111 | syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; | |||
1112 | num_deps = chunk->length_dw * 4 / | |||
1113 | sizeof(struct drm_amdgpu_cs_chunk_syncobj); | |||
1114 | ||||
1115 | if (p->post_deps) | |||
1116 | return -EINVAL22; | |||
1117 | ||||
1118 | p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), | |||
1119 | GFP_KERNEL(0x0001 | 0x0004)); | |||
1120 | p->num_post_deps = 0; | |||
1121 | ||||
1122 | if (!p->post_deps) | |||
1123 | return -ENOMEM12; | |||
1124 | ||||
1125 | for (i = 0; i < num_deps; ++i) { | |||
1126 | struct amdgpu_cs_post_dep *dep = &p->post_deps[i]; | |||
1127 | ||||
1128 | dep->chain = NULL((void *)0); | |||
1129 | if (syncobj_deps[i].point) { | |||
1130 | dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL(0x0001 | 0x0004)); | |||
1131 | if (!dep->chain) | |||
1132 | return -ENOMEM12; | |||
1133 | } | |||
1134 | ||||
1135 | dep->syncobj = drm_syncobj_find(p->filp, | |||
1136 | syncobj_deps[i].handle); | |||
1137 | if (!dep->syncobj) { | |||
1138 | kfree(dep->chain); | |||
1139 | return -EINVAL22; | |||
1140 | } | |||
1141 | dep->point = syncobj_deps[i].point; | |||
1142 | p->num_post_deps++; | |||
1143 | } | |||
1144 | ||||
1145 | return 0; | |||
1146 | } | |||
1147 | ||||
1148 | static int amdgpu_cs_dependencies(struct amdgpu_device *adev, | |||
1149 | struct amdgpu_cs_parser *p) | |||
1150 | { | |||
1151 | int i, r; | |||
1152 | ||||
1153 | for (i = 0; i < p->nchunks; ++i) { | |||
1154 | struct amdgpu_cs_chunk *chunk; | |||
1155 | ||||
1156 | chunk = &p->chunks[i]; | |||
1157 | ||||
1158 | switch (chunk->chunk_id) { | |||
1159 | case AMDGPU_CHUNK_ID_DEPENDENCIES0x03: | |||
1160 | case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES0x07: | |||
1161 | r = amdgpu_cs_process_fence_dep(p, chunk); | |||
1162 | if (r) | |||
1163 | return r; | |||
1164 | break; | |||
1165 | case AMDGPU_CHUNK_ID_SYNCOBJ_IN0x04: | |||
1166 | r = amdgpu_cs_process_syncobj_in_dep(p, chunk); | |||
1167 | if (r) | |||
1168 | return r; | |||
1169 | break; | |||
1170 | case AMDGPU_CHUNK_ID_SYNCOBJ_OUT0x05: | |||
1171 | r = amdgpu_cs_process_syncobj_out_dep(p, chunk); | |||
1172 | if (r) | |||
1173 | return r; | |||
1174 | break; | |||
1175 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT0x08: | |||
1176 | r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk); | |||
1177 | if (r) | |||
1178 | return r; | |||
1179 | break; | |||
1180 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL0x09: | |||
1181 | r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk); | |||
1182 | if (r) | |||
1183 | return r; | |||
1184 | break; | |||
1185 | } | |||
1186 | } | |||
1187 | ||||
1188 | return 0; | |||
1189 | } | |||
1190 | ||||
1191 | static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) | |||
1192 | { | |||
1193 | int i; | |||
1194 | ||||
1195 | for (i = 0; i < p->num_post_deps; ++i) { | |||
1196 | if (p->post_deps[i].chain && p->post_deps[i].point) { | |||
1197 | drm_syncobj_add_point(p->post_deps[i].syncobj, | |||
1198 | p->post_deps[i].chain, | |||
1199 | p->fence, p->post_deps[i].point); | |||
1200 | p->post_deps[i].chain = NULL((void *)0); | |||
1201 | } else { | |||
1202 | drm_syncobj_replace_fence(p->post_deps[i].syncobj, | |||
1203 | p->fence); | |||
1204 | } | |||
1205 | } | |||
1206 | } | |||
1207 | ||||
1208 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1209 | union drm_amdgpu_cs *cs) | |||
1210 | { | |||
1211 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; | |||
1212 | struct drm_sched_entity *entity = p->entity; | |||
1213 | struct amdgpu_bo_list_entry *e; | |||
1214 | struct amdgpu_job *job; | |||
1215 | uint64_t seq; | |||
1216 | int r; | |||
1217 | ||||
1218 | job = p->job; | |||
1219 | p->job = NULL((void *)0); | |||
1220 | ||||
1221 | r = drm_sched_job_init(&job->base, entity, &fpriv->vm); | |||
1222 | if (r) | |||
1223 | goto error_unlock; | |||
1224 | ||||
1225 | /* No memory allocation is allowed while holding the notifier lock. | |||
1226 | * The lock is held until amdgpu_cs_submit is finished and fence is | |||
1227 | * added to BOs. | |||
1228 | */ | |||
1229 | mutex_lock(&p->adev->notifier_lock)rw_enter_write(&p->adev->notifier_lock); | |||
1230 | ||||
1231 | /* If userptr are invalidated after amdgpu_cs_parser_bos(), return | |||
1232 | * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl. | |||
1233 | */ | |||
1234 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list)for (e = amdgpu_bo_list_array_entry(p->bo_list, (p->bo_list )->first_userptr); e != amdgpu_bo_list_array_entry(p->bo_list , (p->bo_list)->num_entries); ++e) { | |||
1235 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); | |||
1236 | ||||
1237 | r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); | |||
1238 | } | |||
1239 | if (r) { | |||
1240 | r = -EAGAIN35; | |||
1241 | goto error_abort; | |||
1242 | } | |||
1243 | ||||
1244 | p->fence = dma_fence_get(&job->base.s_fence->finished); | |||
1245 | ||||
1246 | amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq); | |||
1247 | amdgpu_cs_post_dependencies(p); | |||
1248 | ||||
1249 | if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT(1 << 0)) && | |||
1250 | !p->ctx->preamble_presented) { | |||
1251 | job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST(1 << 1); | |||
1252 | p->ctx->preamble_presented = true1; | |||
1253 | } | |||
1254 | ||||
1255 | cs->out.handle = seq; | |||
1256 | job->uf_sequence = seq; | |||
1257 | ||||
1258 | amdgpu_job_free_resources(job); | |||
1259 | ||||
1260 | trace_amdgpu_cs_ioctl(job); | |||
1261 | amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket); | |||
1262 | drm_sched_entity_push_job(&job->base, entity); | |||
1263 | ||||
1264 | amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); | |||
1265 | ||||
1266 | ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); | |||
1267 | mutex_unlock(&p->adev->notifier_lock)rw_exit_write(&p->adev->notifier_lock); | |||
1268 | ||||
1269 | return 0; | |||
1270 | ||||
1271 | error_abort: | |||
1272 | drm_sched_job_cleanup(&job->base); | |||
1273 | mutex_unlock(&p->adev->notifier_lock)rw_exit_write(&p->adev->notifier_lock); | |||
1274 | ||||
1275 | error_unlock: | |||
1276 | amdgpu_job_free(job); | |||
1277 | return r; | |||
1278 | } | |||
1279 | ||||
1280 | static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser) | |||
1281 | { | |||
1282 | int i; | |||
1283 | ||||
1284 | if (!trace_amdgpu_cs_enabled()) | |||
1285 | return; | |||
1286 | ||||
1287 | for (i = 0; i < parser->job->num_ibs; i++) | |||
1288 | trace_amdgpu_cs(parser, i); | |||
1289 | } | |||
1290 | ||||
1291 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
1292 | { | |||
1293 | struct amdgpu_device *adev = drm_to_adev(dev); | |||
1294 | union drm_amdgpu_cs *cs = data; | |||
1295 | struct amdgpu_cs_parser parser = {}; | |||
| ||||
1296 | bool_Bool reserved_buffers = false0; | |||
1297 | int r; | |||
1298 | ||||
1299 | if (amdgpu_ras_intr_triggered()) | |||
1300 | return -EHWPOISON5; | |||
1301 | ||||
1302 | if (!adev->accel_working) | |||
1303 | return -EBUSY16; | |||
1304 | ||||
1305 | parser.adev = adev; | |||
1306 | parser.filp = filp; | |||
1307 | ||||
1308 | r = amdgpu_cs_parser_init(&parser, data); | |||
1309 | if (r
| |||
1310 | if (printk_ratelimit()1) | |||
1311 | DRM_ERROR("Failed to initialize parser %d!\n", r)__drm_err("Failed to initialize parser %d!\n", r); | |||
1312 | goto out; | |||
1313 | } | |||
1314 | ||||
1315 | r = amdgpu_cs_ib_fill(adev, &parser); | |||
1316 | if (r) | |||
1317 | goto out; | |||
1318 | ||||
1319 | r = amdgpu_cs_dependencies(adev, &parser); | |||
1320 | if (r) { | |||
1321 | DRM_ERROR("Failed in the dependencies handling %d!\n", r)__drm_err("Failed in the dependencies handling %d!\n", r); | |||
1322 | goto out; | |||
1323 | } | |||
1324 | ||||
1325 | r = amdgpu_cs_parser_bos(&parser, data); | |||
1326 | if (r) { | |||
1327 | if (r == -ENOMEM12) | |||
1328 | DRM_ERROR("Not enough memory for command submission!\n")__drm_err("Not enough memory for command submission!\n"); | |||
1329 | else if (r != -ERESTARTSYS4 && r != -EAGAIN35) | |||
1330 | DRM_ERROR("Failed to process the buffer list %d!\n", r)__drm_err("Failed to process the buffer list %d!\n", r); | |||
1331 | goto out; | |||
1332 | } | |||
1333 | ||||
1334 | reserved_buffers = true1; | |||
1335 | ||||
1336 | trace_amdgpu_cs_ibs(&parser); | |||
1337 | ||||
1338 | r = amdgpu_cs_vm_handling(&parser); | |||
1339 | if (r) | |||
1340 | goto out; | |||
1341 | ||||
1342 | r = amdgpu_cs_submit(&parser, cs); | |||
1343 | ||||
1344 | out: | |||
1345 | amdgpu_cs_parser_fini(&parser, r, reserved_buffers); | |||
1346 | ||||
1347 | return r; | |||
1348 | } | |||
1349 | ||||
1350 | /** | |||
1351 | * amdgpu_cs_wait_ioctl - wait for a command submission to finish | |||
1352 | * | |||
1353 | * @dev: drm device | |||
1354 | * @data: data from userspace | |||
1355 | * @filp: file private | |||
1356 | * | |||
1357 | * Wait for the command submission identified by handle to finish. | |||
1358 | */ | |||
1359 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, | |||
1360 | struct drm_file *filp) | |||
1361 | { | |||
1362 | union drm_amdgpu_wait_cs *wait = data; | |||
1363 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); | |||
1364 | struct drm_sched_entity *entity; | |||
1365 | struct amdgpu_ctx *ctx; | |||
1366 | struct dma_fence *fence; | |||
1367 | long r; | |||
1368 | ||||
1369 | ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); | |||
1370 | if (ctx == NULL((void *)0)) | |||
1371 | return -EINVAL22; | |||
1372 | ||||
1373 | r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance, | |||
1374 | wait->in.ring, &entity); | |||
1375 | if (r) { | |||
1376 | amdgpu_ctx_put(ctx); | |||
1377 | return r; | |||
1378 | } | |||
1379 | ||||
1380 | fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle); | |||
1381 | if (IS_ERR(fence)) | |||
1382 | r = PTR_ERR(fence); | |||
1383 | else if (fence) { | |||
1384 | r = dma_fence_wait_timeout(fence, true1, timeout); | |||
1385 | if (r > 0 && fence->error) | |||
1386 | r = fence->error; | |||
1387 | dma_fence_put(fence); | |||
1388 | } else | |||
1389 | r = 1; | |||
1390 | ||||
1391 | amdgpu_ctx_put(ctx); | |||
1392 | if (r < 0) | |||
1393 | return r; | |||
1394 | ||||
1395 | memset(wait, 0, sizeof(*wait))__builtin_memset((wait), (0), (sizeof(*wait))); | |||
1396 | wait->out.status = (r == 0); | |||
1397 | ||||
1398 | return 0; | |||
1399 | } | |||
1400 | ||||
1401 | /** | |||
1402 | * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence | |||
1403 | * | |||
1404 | * @adev: amdgpu device | |||
1405 | * @filp: file private | |||
1406 | * @user: drm_amdgpu_fence copied from user space | |||
1407 | */ | |||
1408 | static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, | |||
1409 | struct drm_file *filp, | |||
1410 | struct drm_amdgpu_fence *user) | |||
1411 | { | |||
1412 | struct drm_sched_entity *entity; | |||
1413 | struct amdgpu_ctx *ctx; | |||
1414 | struct dma_fence *fence; | |||
1415 | int r; | |||
1416 | ||||
1417 | ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id); | |||
1418 | if (ctx == NULL((void *)0)) | |||
1419 | return ERR_PTR(-EINVAL22); | |||
1420 | ||||
1421 | r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance, | |||
1422 | user->ring, &entity); | |||
1423 | if (r) { | |||
1424 | amdgpu_ctx_put(ctx); | |||
1425 | return ERR_PTR(r); | |||
1426 | } | |||
1427 | ||||
1428 | fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no); | |||
1429 | amdgpu_ctx_put(ctx); | |||
1430 | ||||
1431 | return fence; | |||
1432 | } | |||
1433 | ||||
1434 | int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, | |||
1435 | struct drm_file *filp) | |||
1436 | { | |||
1437 | struct amdgpu_device *adev = drm_to_adev(dev); | |||
1438 | union drm_amdgpu_fence_to_handle *info = data; | |||
1439 | struct dma_fence *fence; | |||
1440 | struct drm_syncobj *syncobj; | |||
1441 | struct sync_file *sync_file; | |||
1442 | int fd, r; | |||
1443 | ||||
1444 | fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence); | |||
1445 | if (IS_ERR(fence)) | |||
1446 | return PTR_ERR(fence); | |||
1447 | ||||
1448 | if (!fence) | |||
1449 | fence = dma_fence_get_stub(); | |||
1450 | ||||
1451 | switch (info->in.what) { | |||
1452 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ0: | |||
1453 | r = drm_syncobj_create(&syncobj, 0, fence); | |||
1454 | dma_fence_put(fence); | |||
1455 | if (r) | |||
1456 | return r; | |||
1457 | r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle); | |||
1458 | drm_syncobj_put(syncobj); | |||
1459 | return r; | |||
1460 | ||||
1461 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD1: | |||
1462 | r = drm_syncobj_create(&syncobj, 0, fence); | |||
1463 | dma_fence_put(fence); | |||
1464 | if (r) | |||
1465 | return r; | |||
1466 | r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle); | |||
1467 | drm_syncobj_put(syncobj); | |||
1468 | return r; | |||
1469 | ||||
1470 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD2: | |||
1471 | fd = get_unused_fd_flags(O_CLOEXEC0x10000); | |||
1472 | if (fd < 0) { | |||
1473 | dma_fence_put(fence); | |||
1474 | return fd; | |||
1475 | } | |||
1476 | ||||
1477 | sync_file = sync_file_create(fence); | |||
1478 | dma_fence_put(fence); | |||
1479 | if (!sync_file) { | |||
1480 | put_unused_fd(fd); | |||
1481 | return -ENOMEM12; | |||
1482 | } | |||
1483 | ||||
1484 | fd_install(fd, sync_file->file); | |||
1485 | info->out.handle = fd; | |||
1486 | return 0; | |||
1487 | ||||
1488 | default: | |||
1489 | return -EINVAL22; | |||
1490 | } | |||
1491 | } | |||
1492 | ||||
1493 | /** | |||
1494 | * amdgpu_cs_wait_all_fence - wait on all fences to signal | |||
1495 | * | |||
1496 | * @adev: amdgpu device | |||
1497 | * @filp: file private | |||
1498 | * @wait: wait parameters | |||
1499 | * @fences: array of drm_amdgpu_fence | |||
1500 | */ | |||
1501 | static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, | |||
1502 | struct drm_file *filp, | |||
1503 | union drm_amdgpu_wait_fences *wait, | |||
1504 | struct drm_amdgpu_fence *fences) | |||
1505 | { | |||
1506 | uint32_t fence_count = wait->in.fence_count; | |||
1507 | unsigned int i; | |||
1508 | long r = 1; | |||
1509 | ||||
1510 | for (i = 0; i < fence_count; i++) { | |||
1511 | struct dma_fence *fence; | |||
1512 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); | |||
1513 | ||||
1514 | fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); | |||
1515 | if (IS_ERR(fence)) | |||
1516 | return PTR_ERR(fence); | |||
1517 | else if (!fence) | |||
1518 | continue; | |||
1519 | ||||
1520 | r = dma_fence_wait_timeout(fence, true1, timeout); | |||
1521 | dma_fence_put(fence); | |||
1522 | if (r < 0) | |||
1523 | return r; | |||
1524 | ||||
1525 | if (r == 0) | |||
1526 | break; | |||
1527 | ||||
1528 | if (fence->error) | |||
1529 | return fence->error; | |||
1530 | } | |||
1531 | ||||
1532 | memset(wait, 0, sizeof(*wait))__builtin_memset((wait), (0), (sizeof(*wait))); | |||
1533 | wait->out.status = (r > 0); | |||
1534 | ||||
1535 | return 0; | |||
1536 | } | |||
1537 | ||||
1538 | /** | |||
1539 | * amdgpu_cs_wait_any_fence - wait on any fence to signal | |||
1540 | * | |||
1541 | * @adev: amdgpu device | |||
1542 | * @filp: file private | |||
1543 | * @wait: wait parameters | |||
1544 | * @fences: array of drm_amdgpu_fence | |||
1545 | */ | |||
1546 | static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, | |||
1547 | struct drm_file *filp, | |||
1548 | union drm_amdgpu_wait_fences *wait, | |||
1549 | struct drm_amdgpu_fence *fences) | |||
1550 | { | |||
1551 | unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns); | |||
1552 | uint32_t fence_count = wait->in.fence_count; | |||
1553 | uint32_t first = ~0; | |||
1554 | struct dma_fence **array; | |||
1555 | unsigned int i; | |||
1556 | long r; | |||
1557 | ||||
1558 | /* Prepare the fence array */ | |||
1559 | array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL(0x0001 | 0x0004)); | |||
1560 | ||||
1561 | if (array == NULL((void *)0)) | |||
1562 | return -ENOMEM12; | |||
1563 | ||||
1564 | for (i = 0; i < fence_count; i++) { | |||
1565 | struct dma_fence *fence; | |||
1566 | ||||
1567 | fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); | |||
1568 | if (IS_ERR(fence)) { | |||
1569 | r = PTR_ERR(fence); | |||
1570 | goto err_free_fence_array; | |||
1571 | } else if (fence) { | |||
1572 | array[i] = fence; | |||
1573 | } else { /* NULL, the fence has been already signaled */ | |||
1574 | r = 1; | |||
1575 | first = i; | |||
1576 | goto out; | |||
1577 | } | |||
1578 | } | |||
1579 | ||||
1580 | r = dma_fence_wait_any_timeout(array, fence_count, true1, timeout, | |||
1581 | &first); | |||
1582 | if (r < 0) | |||
1583 | goto err_free_fence_array; | |||
1584 | ||||
1585 | out: | |||
1586 | memset(wait, 0, sizeof(*wait))__builtin_memset((wait), (0), (sizeof(*wait))); | |||
1587 | wait->out.status = (r > 0); | |||
1588 | wait->out.first_signaled = first; | |||
1589 | ||||
1590 | if (first < fence_count && array[first]) | |||
1591 | r = array[first]->error; | |||
1592 | else | |||
1593 | r = 0; | |||
1594 | ||||
1595 | err_free_fence_array: | |||
1596 | for (i = 0; i < fence_count; i++) | |||
1597 | dma_fence_put(array[i]); | |||
1598 | kfree(array); | |||
1599 | ||||
1600 | return r; | |||
1601 | } | |||
1602 | ||||
1603 | /** | |||
1604 | * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish | |||
1605 | * | |||
1606 | * @dev: drm device | |||
1607 | * @data: data from userspace | |||
1608 | * @filp: file private | |||
1609 | */ | |||
1610 | int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, | |||
1611 | struct drm_file *filp) | |||
1612 | { | |||
1613 | struct amdgpu_device *adev = drm_to_adev(dev); | |||
1614 | union drm_amdgpu_wait_fences *wait = data; | |||
1615 | uint32_t fence_count = wait->in.fence_count; | |||
1616 | struct drm_amdgpu_fence *fences_user; | |||
1617 | struct drm_amdgpu_fence *fences; | |||
1618 | int r; | |||
1619 | ||||
1620 | /* Get the fences from userspace */ | |||
1621 | fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), | |||
1622 | GFP_KERNEL(0x0001 | 0x0004)); | |||
1623 | if (fences == NULL((void *)0)) | |||
1624 | return -ENOMEM12; | |||
1625 | ||||
1626 | fences_user = u64_to_user_ptr(wait->in.fences)((void *)(uintptr_t)(wait->in.fences)); | |||
1627 | if (copy_from_user(fences, fences_user, | |||
1628 | sizeof(struct drm_amdgpu_fence) * fence_count)) { | |||
1629 | r = -EFAULT14; | |||
1630 | goto err_free_fences; | |||
1631 | } | |||
1632 | ||||
1633 | if (wait->in.wait_all) | |||
1634 | r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); | |||
1635 | else | |||
1636 | r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); | |||
1637 | ||||
1638 | err_free_fences: | |||
1639 | kfree(fences); | |||
1640 | ||||
1641 | return r; | |||
1642 | } | |||
1643 | ||||
1644 | /** | |||
1645 | * amdgpu_cs_find_bo_va - find bo_va for VM address | |||
1646 | * | |||
1647 | * @parser: command submission parser context | |||
1648 | * @addr: VM address | |||
1649 | * @bo: resulting BO of the mapping found | |||
1650 | * | |||
1651 | * Search the buffer objects in the command submission context for a certain | |||
1652 | * virtual memory address. Returns allocation structure when found, NULL | |||
1653 | * otherwise. | |||
1654 | */ | |||
1655 | int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, | |||
1656 | uint64_t addr, struct amdgpu_bo **bo, | |||
1657 | struct amdgpu_bo_va_mapping **map) | |||
1658 | { | |||
1659 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | |||
1660 | struct ttm_operation_ctx ctx = { false0, false0 }; | |||
1661 | struct amdgpu_vm *vm = &fpriv->vm; | |||
1662 | struct amdgpu_bo_va_mapping *mapping; | |||
1663 | int r; | |||
1664 | ||||
1665 | addr /= AMDGPU_GPU_PAGE_SIZE4096; | |||
1666 | ||||
1667 | mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); | |||
1668 | if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) | |||
1669 | return -EINVAL22; | |||
1670 | ||||
1671 | *bo = mapping->bo_va->base.bo; | |||
1672 | *map = mapping; | |||
1673 | ||||
1674 | /* Double check that the BO is reserved by this CS */ | |||
1675 | if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) | |||
1676 | return -EINVAL22; | |||
1677 | ||||
1678 | if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS(1 << 5))) { | |||
1679 | (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS(1 << 5); | |||
1680 | amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); | |||
1681 | r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); | |||
1682 | if (r) | |||
1683 | return r; | |||
1684 | } | |||
1685 | ||||
1686 | return amdgpu_ttm_alloc_gart(&(*bo)->tbo); | |||
1687 | } |