File: | dev/pci/drm/radeon/radeon_vce.c |
Warning: | line 97, column 9 Access to field 'size' results in a dereference of a null pointer (loaded from field 'vce_fw') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * Copyright 2013 Advanced Micro Devices, Inc. | |||
3 | * All Rights Reserved. | |||
4 | * | |||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
6 | * copy of this software and associated documentation files (the | |||
7 | * "Software"), to deal in the Software without restriction, including | |||
8 | * without limitation the rights to use, copy, modify, merge, publish, | |||
9 | * distribute, sub license, and/or sell copies of the Software, and to | |||
10 | * permit persons to whom the Software is furnished to do so, subject to | |||
11 | * the following conditions: | |||
12 | * | |||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
20 | * | |||
21 | * The above copyright notice and this permission notice (including the | |||
22 | * next paragraph) shall be included in all copies or substantial portions | |||
23 | * of the Software. | |||
24 | * | |||
25 | * Authors: Christian König <christian.koenig@amd.com> | |||
26 | */ | |||
27 | ||||
28 | #include <linux/firmware.h> | |||
29 | #include <linux/module.h> | |||
30 | ||||
31 | #include <drm/drm.h> | |||
32 | ||||
33 | #include "radeon.h" | |||
34 | #include "radeon_asic.h" | |||
35 | #include "sid.h" | |||
36 | ||||
37 | /* 1 second timeout */ | |||
38 | #define VCE_IDLE_TIMEOUT_MS1000 1000 | |||
39 | ||||
40 | /* Firmware Names */ | |||
41 | #define FIRMWARE_TAHITI"radeon/TAHITI_vce.bin" "radeon/TAHITI_vce.bin" | |||
42 | #define FIRMWARE_BONAIRE"radeon/BONAIRE_vce.bin" "radeon/BONAIRE_vce.bin" | |||
43 | ||||
44 | MODULE_FIRMWARE(FIRMWARE_TAHITI); | |||
45 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); | |||
46 | ||||
47 | static void radeon_vce_idle_work_handler(struct work_struct *work); | |||
48 | ||||
49 | /** | |||
50 | * radeon_vce_init - allocate memory, load vce firmware | |||
51 | * | |||
52 | * @rdev: radeon_device pointer | |||
53 | * | |||
54 | * First step to get VCE online, allocate memory and load the firmware | |||
55 | */ | |||
56 | int radeon_vce_init(struct radeon_device *rdev) | |||
57 | { | |||
58 | static const char *fw_version = "[ATI LIB=VCEFW,"; | |||
59 | static const char *fb_version = "[ATI LIB=VCEFWSTATS,"; | |||
60 | unsigned long size; | |||
61 | const char *fw_name, *c; | |||
62 | uint8_t start, mid, end; | |||
63 | int i, r; | |||
64 | ||||
65 | INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler); | |||
| ||||
66 | ||||
67 | switch (rdev->family) { | |||
68 | case CHIP_TAHITI: | |||
69 | case CHIP_PITCAIRN: | |||
70 | case CHIP_VERDE: | |||
71 | case CHIP_OLAND: | |||
72 | case CHIP_ARUBA: | |||
73 | fw_name = FIRMWARE_TAHITI"radeon/TAHITI_vce.bin"; | |||
74 | break; | |||
75 | ||||
76 | case CHIP_BONAIRE: | |||
77 | case CHIP_KAVERI: | |||
78 | case CHIP_KABINI: | |||
79 | case CHIP_HAWAII: | |||
80 | case CHIP_MULLINS: | |||
81 | fw_name = FIRMWARE_BONAIRE"radeon/BONAIRE_vce.bin"; | |||
82 | break; | |||
83 | ||||
84 | default: | |||
85 | return -EINVAL22; | |||
86 | } | |||
87 | ||||
88 | r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev); | |||
89 | if (r) { | |||
90 | dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n",printf("drm:pid%d:%s *ERROR* " "radeon_vce: Can't load firmware \"%s\"\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name ) | |||
91 | fw_name)printf("drm:pid%d:%s *ERROR* " "radeon_vce: Can't load firmware \"%s\"\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name ); | |||
92 | return r; | |||
93 | } | |||
94 | ||||
95 | /* search for firmware version */ | |||
96 | ||||
97 | size = rdev->vce_fw->size - strlen(fw_version) - 9; | |||
| ||||
98 | c = rdev->vce_fw->data; | |||
99 | for (;size > 0; --size, ++c) | |||
100 | if (strncmp(c, fw_version, strlen(fw_version)) == 0) | |||
101 | break; | |||
102 | ||||
103 | if (size == 0) | |||
104 | return -EINVAL22; | |||
105 | ||||
106 | c += strlen(fw_version); | |||
107 | #ifdef notyet | |||
108 | if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3) | |||
109 | return -EINVAL22; | |||
110 | #else | |||
111 | #if 0 | |||
112 | int x; | |||
113 | printf("\n%s A\n", __func__); | |||
114 | for (x = 0; x < 16; x++) { | |||
115 | printf("%02x ", c[x]); | |||
116 | } | |||
117 | #endif | |||
118 | if (c[2] != '.') { | |||
119 | printf("%s %s bad start value\n", rdev->self.dv_xname, __func__); | |||
120 | return -EINVAL22; | |||
121 | } | |||
122 | start = (10 * (c[0] - '0')) + (c[1] - '0'); | |||
123 | c += 3; | |||
124 | ||||
125 | if (c[1] != '.') { | |||
126 | printf("%s %s bad mid value\n", rdev->self.dv_xname, __func__); | |||
127 | return -EINVAL22; | |||
128 | } | |||
129 | mid = c[0] - '0'; | |||
130 | c += 2; | |||
131 | ||||
132 | if (c[1] != ']') { | |||
133 | printf("%s %s bad end value\n", rdev->self.dv_xname, __func__); | |||
134 | return -EINVAL22; | |||
135 | } | |||
136 | end = c[0] - '0'; | |||
137 | #endif | |||
138 | ||||
139 | /* search for feedback version */ | |||
140 | ||||
141 | size = rdev->vce_fw->size - strlen(fb_version) - 3; | |||
142 | c = rdev->vce_fw->data; | |||
143 | for (;size > 0; --size, ++c) | |||
144 | if (strncmp(c, fb_version, strlen(fb_version)) == 0) | |||
145 | break; | |||
146 | ||||
147 | if (size == 0) | |||
148 | return -EINVAL22; | |||
149 | ||||
150 | c += strlen(fb_version); | |||
151 | #ifdef notyet | |||
152 | if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1) | |||
153 | return -EINVAL22; | |||
154 | #else | |||
155 | #if 0 | |||
156 | printf("\n%s B\n", __func__); | |||
157 | for (x = 0; x < 16; x++) { | |||
158 | printf("%02x ", c[x]); | |||
159 | } | |||
160 | printf("\n"); | |||
161 | #endif | |||
162 | if (c[2] != ']') { | |||
163 | printf("%s %s bad fb_version value\n", rdev->self.dv_xname, __func__); | |||
164 | return -EINVAL22; | |||
165 | } | |||
166 | rdev->vce.fb_version = (10 * (c[0] - '0')) + (c[1] - '0'); | |||
167 | #endif | |||
168 | ||||
169 | DRM_INFO("Found VCE firmware/feedback version %d.%d.%d / %d!\n",printk("\0016" "[" "drm" "] " "Found VCE firmware/feedback version %d.%d.%d / %d!\n" , start, mid, end, rdev->vce.fb_version) | |||
170 | start, mid, end, rdev->vce.fb_version)printk("\0016" "[" "drm" "] " "Found VCE firmware/feedback version %d.%d.%d / %d!\n" , start, mid, end, rdev->vce.fb_version); | |||
171 | ||||
172 | rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8); | |||
173 | ||||
174 | /* we can only work with this fw version for now */ | |||
175 | if ((rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) && | |||
176 | (rdev->vce.fw_version != ((50 << 24) | (0 << 16) | (1 << 8))) && | |||
177 | (rdev->vce.fw_version != ((50 << 24) | (1 << 16) | (2 << 8)))) | |||
178 | return -EINVAL22; | |||
179 | ||||
180 | /* allocate firmware, stack and heap BO */ | |||
181 | ||||
182 | if (rdev->family < CHIP_BONAIRE) | |||
183 | size = vce_v1_0_bo_size(rdev); | |||
184 | else | |||
185 | size = vce_v2_0_bo_size(rdev); | |||
186 | r = radeon_bo_create(rdev, size, PAGE_SIZE(1 << 12), true1, | |||
187 | RADEON_GEM_DOMAIN_VRAM0x4, 0, NULL((void *)0), NULL((void *)0), | |||
188 | &rdev->vce.vcpu_bo); | |||
189 | if (r) { | |||
190 | dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to allocate VCE bo\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); | |||
191 | return r; | |||
192 | } | |||
193 | ||||
194 | r = radeon_bo_reserve(rdev->vce.vcpu_bo, false0); | |||
195 | if (r) { | |||
196 | radeon_bo_unref(&rdev->vce.vcpu_bo); | |||
197 | dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to reserve VCE bo\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); | |||
198 | return r; | |||
199 | } | |||
200 | ||||
201 | r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM0x4, | |||
202 | &rdev->vce.gpu_addr); | |||
203 | radeon_bo_unreserve(rdev->vce.vcpu_bo); | |||
204 | if (r) { | |||
205 | radeon_bo_unref(&rdev->vce.vcpu_bo); | |||
206 | dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) VCE bo pin failed\n", ({ struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); | |||
207 | return r; | |||
208 | } | |||
209 | ||||
210 | for (i = 0; i < RADEON_MAX_VCE_HANDLES16; ++i) { | |||
211 | atomic_set(&rdev->vce.handles[i], 0)({ typeof(*(&rdev->vce.handles[i])) __tmp = ((0)); *(volatile typeof(*(&rdev->vce.handles[i])) *)&(*(&rdev-> vce.handles[i])) = __tmp; __tmp; }); | |||
212 | rdev->vce.filp[i] = NULL((void *)0); | |||
213 | } | |||
214 | ||||
215 | return 0; | |||
216 | } | |||
217 | ||||
218 | /** | |||
219 | * radeon_vce_fini - free memory | |||
220 | * | |||
221 | * @rdev: radeon_device pointer | |||
222 | * | |||
223 | * Last step on VCE teardown, free firmware memory | |||
224 | */ | |||
225 | void radeon_vce_fini(struct radeon_device *rdev) | |||
226 | { | |||
227 | if (rdev->vce.vcpu_bo == NULL((void *)0)) | |||
228 | return; | |||
229 | ||||
230 | radeon_bo_unref(&rdev->vce.vcpu_bo); | |||
231 | ||||
232 | release_firmware(rdev->vce_fw); | |||
233 | } | |||
234 | ||||
235 | /** | |||
236 | * radeon_vce_suspend - unpin VCE fw memory | |||
237 | * | |||
238 | * @rdev: radeon_device pointer | |||
239 | * | |||
240 | */ | |||
241 | int radeon_vce_suspend(struct radeon_device *rdev) | |||
242 | { | |||
243 | int i; | |||
244 | ||||
245 | if (rdev->vce.vcpu_bo == NULL((void *)0)) | |||
246 | return 0; | |||
247 | ||||
248 | for (i = 0; i < RADEON_MAX_VCE_HANDLES16; ++i) | |||
249 | if (atomic_read(&rdev->vce.handles[i])({ typeof(*(&rdev->vce.handles[i])) __tmp = *(volatile typeof(*(&rdev->vce.handles[i])) *)&(*(&rdev-> vce.handles[i])); membar_datadep_consumer(); __tmp; })) | |||
250 | break; | |||
251 | ||||
252 | if (i == RADEON_MAX_VCE_HANDLES16) | |||
253 | return 0; | |||
254 | ||||
255 | /* TODO: suspending running encoding sessions isn't supported */ | |||
256 | return -EINVAL22; | |||
257 | } | |||
258 | ||||
259 | /** | |||
260 | * radeon_vce_resume - pin VCE fw memory | |||
261 | * | |||
262 | * @rdev: radeon_device pointer | |||
263 | * | |||
264 | */ | |||
265 | int radeon_vce_resume(struct radeon_device *rdev) | |||
266 | { | |||
267 | void *cpu_addr; | |||
268 | int r; | |||
269 | ||||
270 | if (rdev->vce.vcpu_bo == NULL((void *)0)) | |||
271 | return -EINVAL22; | |||
272 | ||||
273 | r = radeon_bo_reserve(rdev->vce.vcpu_bo, false0); | |||
274 | if (r) { | |||
275 | dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to reserve VCE bo\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); | |||
276 | return r; | |||
277 | } | |||
278 | ||||
279 | r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr); | |||
280 | if (r) { | |||
281 | radeon_bo_unreserve(rdev->vce.vcpu_bo); | |||
282 | dev_err(rdev->dev, "(%d) VCE map failed\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) VCE map failed\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__ , r); | |||
283 | return r; | |||
284 | } | |||
285 | ||||
286 | memset(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo))__builtin_memset((cpu_addr), (0), (radeon_bo_size(rdev->vce .vcpu_bo))); | |||
287 | if (rdev->family < CHIP_BONAIRE) | |||
288 | r = vce_v1_0_load_fw(rdev, cpu_addr); | |||
289 | else | |||
290 | memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size)__builtin_memcpy((cpu_addr), (rdev->vce_fw->data), (rdev ->vce_fw->size)); | |||
291 | ||||
292 | radeon_bo_kunmap(rdev->vce.vcpu_bo); | |||
293 | ||||
294 | radeon_bo_unreserve(rdev->vce.vcpu_bo); | |||
295 | ||||
296 | return r; | |||
297 | } | |||
298 | ||||
299 | /** | |||
300 | * radeon_vce_idle_work_handler - power off VCE | |||
301 | * | |||
302 | * @work: pointer to work structure | |||
303 | * | |||
304 | * power of VCE when it's not used any more | |||
305 | */ | |||
306 | static void radeon_vce_idle_work_handler(struct work_struct *work) | |||
307 | { | |||
308 | struct radeon_device *rdev = | |||
309 | container_of(work, struct radeon_device, vce.idle_work.work)({ const __typeof( ((struct radeon_device *)0)->vce.idle_work .work ) *__mptr = (work); (struct radeon_device *)( (char *)__mptr - __builtin_offsetof(struct radeon_device, vce.idle_work.work ) );}); | |||
310 | ||||
311 | if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX6) == 0) && | |||
312 | (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX7) == 0)) { | |||
313 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | |||
314 | radeon_dpm_enable_vce(rdev, false0); | |||
315 | } else { | |||
316 | radeon_set_vce_clocks(rdev, 0, 0)(rdev)->asic->pm.set_vce_clocks((rdev), (0), (0)); | |||
317 | } | |||
318 | } else { | |||
319 | schedule_delayed_work(&rdev->vce.idle_work, | |||
320 | msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)(((uint64_t)(1000)) * hz / 1000)); | |||
321 | } | |||
322 | } | |||
323 | ||||
324 | /** | |||
325 | * radeon_vce_note_usage - power up VCE | |||
326 | * | |||
327 | * @rdev: radeon_device pointer | |||
328 | * | |||
329 | * Make sure VCE is powerd up when we want to use it | |||
330 | */ | |||
331 | void radeon_vce_note_usage(struct radeon_device *rdev) | |||
332 | { | |||
333 | bool_Bool streams_changed = false0; | |||
334 | bool_Bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work); | |||
335 | set_clocks &= schedule_delayed_work(&rdev->vce.idle_work, | |||
336 | msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)(((uint64_t)(1000)) * hz / 1000)); | |||
337 | ||||
338 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | |||
339 | /* XXX figure out if the streams changed */ | |||
340 | streams_changed = false0; | |||
341 | } | |||
342 | ||||
343 | if (set_clocks || streams_changed) { | |||
344 | if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { | |||
345 | radeon_dpm_enable_vce(rdev, true1); | |||
346 | } else { | |||
347 | radeon_set_vce_clocks(rdev, 53300, 40000)(rdev)->asic->pm.set_vce_clocks((rdev), (53300), (40000 )); | |||
348 | } | |||
349 | } | |||
350 | } | |||
351 | ||||
352 | /** | |||
353 | * radeon_vce_free_handles - free still open VCE handles | |||
354 | * | |||
355 | * @rdev: radeon_device pointer | |||
356 | * @filp: drm file pointer | |||
357 | * | |||
358 | * Close all VCE handles still open by this file pointer | |||
359 | */ | |||
360 | void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp) | |||
361 | { | |||
362 | int i, r; | |||
363 | for (i = 0; i < RADEON_MAX_VCE_HANDLES16; ++i) { | |||
364 | uint32_t handle = atomic_read(&rdev->vce.handles[i])({ typeof(*(&rdev->vce.handles[i])) __tmp = *(volatile typeof(*(&rdev->vce.handles[i])) *)&(*(&rdev-> vce.handles[i])); membar_datadep_consumer(); __tmp; }); | |||
365 | if (!handle || rdev->vce.filp[i] != filp) | |||
366 | continue; | |||
367 | ||||
368 | radeon_vce_note_usage(rdev); | |||
369 | ||||
370 | r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX6, | |||
371 | handle, NULL((void *)0)); | |||
372 | if (r) | |||
373 | DRM_ERROR("Error destroying VCE handle (%d)!\n", r)__drm_err("Error destroying VCE handle (%d)!\n", r); | |||
374 | ||||
375 | rdev->vce.filp[i] = NULL((void *)0); | |||
376 | atomic_set(&rdev->vce.handles[i], 0)({ typeof(*(&rdev->vce.handles[i])) __tmp = ((0)); *(volatile typeof(*(&rdev->vce.handles[i])) *)&(*(&rdev-> vce.handles[i])) = __tmp; __tmp; }); | |||
377 | } | |||
378 | } | |||
379 | ||||
380 | /** | |||
381 | * radeon_vce_get_create_msg - generate a VCE create msg | |||
382 | * | |||
383 | * @rdev: radeon_device pointer | |||
384 | * @ring: ring we should submit the msg to | |||
385 | * @handle: VCE session handle to use | |||
386 | * @fence: optional fence to return | |||
387 | * | |||
388 | * Open up a stream for HW test | |||
389 | */ | |||
390 | int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, | |||
391 | uint32_t handle, struct radeon_fence **fence) | |||
392 | { | |||
393 | const unsigned ib_size_dw = 1024; | |||
394 | struct radeon_ib ib; | |||
395 | uint64_t dummy; | |||
396 | int i, r; | |||
397 | ||||
398 | r = radeon_ib_get(rdev, ring, &ib, NULL((void *)0), ib_size_dw * 4); | |||
399 | if (r) { | |||
400 | DRM_ERROR("radeon: failed to get ib (%d).\n", r)__drm_err("radeon: failed to get ib (%d).\n", r); | |||
401 | return r; | |||
402 | } | |||
403 | ||||
404 | dummy = ib.gpu_addr + 1024; | |||
405 | ||||
406 | /* stitch together an VCE create msg */ | |||
407 | ib.length_dw = 0; | |||
408 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c)((__uint32_t)(0x0000000c)); /* len */ | |||
409 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001)((__uint32_t)(0x00000001)); /* session cmd */ | |||
410 | ib.ptr[ib.length_dw++] = cpu_to_le32(handle)((__uint32_t)(handle)); | |||
411 | ||||
412 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000030)((__uint32_t)(0x00000030)); /* len */ | |||
413 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x01000001)((__uint32_t)(0x01000001)); /* create cmd */ | |||
414 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000)); | |||
415 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000042)((__uint32_t)(0x00000042)); | |||
416 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000a)((__uint32_t)(0x0000000a)); | |||
417 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001)((__uint32_t)(0x00000001)); | |||
418 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000080)((__uint32_t)(0x00000080)); | |||
419 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000060)((__uint32_t)(0x00000060)); | |||
420 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100)((__uint32_t)(0x00000100)); | |||
421 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100)((__uint32_t)(0x00000100)); | |||
422 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c)((__uint32_t)(0x0000000c)); | |||
423 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000)); | |||
424 | ||||
425 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014)((__uint32_t)(0x00000014)); /* len */ | |||
426 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005)((__uint32_t)(0x05000005)); /* feedback buffer */ | |||
427 | ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy))((__uint32_t)(((u32)(((dummy) >> 16) >> 16)))); | |||
428 | ib.ptr[ib.length_dw++] = cpu_to_le32(dummy)((__uint32_t)(dummy)); | |||
429 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001)((__uint32_t)(0x00000001)); | |||
430 | ||||
431 | for (i = ib.length_dw; i < ib_size_dw; ++i) | |||
432 | ib.ptr[i] = cpu_to_le32(0x0)((__uint32_t)(0x0)); | |||
433 | ||||
434 | r = radeon_ib_schedule(rdev, &ib, NULL((void *)0), false0); | |||
435 | if (r) | |||
436 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r)__drm_err("radeon: failed to schedule ib (%d).\n", r); | |||
437 | ||||
438 | ||||
439 | if (fence) | |||
440 | *fence = radeon_fence_ref(ib.fence); | |||
441 | ||||
442 | radeon_ib_free(rdev, &ib); | |||
443 | ||||
444 | return r; | |||
445 | } | |||
446 | ||||
447 | /** | |||
448 | * radeon_vce_get_destroy_msg - generate a VCE destroy msg | |||
449 | * | |||
450 | * @rdev: radeon_device pointer | |||
451 | * @ring: ring we should submit the msg to | |||
452 | * @handle: VCE session handle to use | |||
453 | * @fence: optional fence to return | |||
454 | * | |||
455 | * Close up a stream for HW test or if userspace failed to do so | |||
456 | */ | |||
457 | int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, | |||
458 | uint32_t handle, struct radeon_fence **fence) | |||
459 | { | |||
460 | const unsigned ib_size_dw = 1024; | |||
461 | struct radeon_ib ib; | |||
462 | uint64_t dummy; | |||
463 | int i, r; | |||
464 | ||||
465 | r = radeon_ib_get(rdev, ring, &ib, NULL((void *)0), ib_size_dw * 4); | |||
466 | if (r) { | |||
467 | DRM_ERROR("radeon: failed to get ib (%d).\n", r)__drm_err("radeon: failed to get ib (%d).\n", r); | |||
468 | return r; | |||
469 | } | |||
470 | ||||
471 | dummy = ib.gpu_addr + 1024; | |||
472 | ||||
473 | /* stitch together an VCE destroy msg */ | |||
474 | ib.length_dw = 0; | |||
475 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c)((__uint32_t)(0x0000000c)); /* len */ | |||
476 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001)((__uint32_t)(0x00000001)); /* session cmd */ | |||
477 | ib.ptr[ib.length_dw++] = cpu_to_le32(handle)((__uint32_t)(handle)); | |||
478 | ||||
479 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014)((__uint32_t)(0x00000014)); /* len */ | |||
480 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005)((__uint32_t)(0x05000005)); /* feedback buffer */ | |||
481 | ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy))((__uint32_t)(((u32)(((dummy) >> 16) >> 16)))); | |||
482 | ib.ptr[ib.length_dw++] = cpu_to_le32(dummy)((__uint32_t)(dummy)); | |||
483 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001)((__uint32_t)(0x00000001)); | |||
484 | ||||
485 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008)((__uint32_t)(0x00000008)); /* len */ | |||
486 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001)((__uint32_t)(0x02000001)); /* destroy cmd */ | |||
487 | ||||
488 | for (i = ib.length_dw; i < ib_size_dw; ++i) | |||
489 | ib.ptr[i] = cpu_to_le32(0x0)((__uint32_t)(0x0)); | |||
490 | ||||
491 | r = radeon_ib_schedule(rdev, &ib, NULL((void *)0), false0); | |||
492 | if (r) { | |||
493 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r)__drm_err("radeon: failed to schedule ib (%d).\n", r); | |||
494 | } | |||
495 | ||||
496 | if (fence) | |||
497 | *fence = radeon_fence_ref(ib.fence); | |||
498 | ||||
499 | radeon_ib_free(rdev, &ib); | |||
500 | ||||
501 | return r; | |||
502 | } | |||
503 | ||||
504 | /** | |||
505 | * radeon_vce_cs_reloc - command submission relocation | |||
506 | * | |||
507 | * @p: parser context | |||
508 | * @lo: address of lower dword | |||
509 | * @hi: address of higher dword | |||
510 | * @size: size of checker for relocation buffer | |||
511 | * | |||
512 | * Patch relocation inside command stream with real buffer address | |||
513 | */ | |||
514 | int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, | |||
515 | unsigned size) | |||
516 | { | |||
517 | struct radeon_cs_chunk *relocs_chunk; | |||
518 | struct radeon_bo_list *reloc; | |||
519 | uint64_t start, end, offset; | |||
520 | unsigned idx; | |||
521 | ||||
522 | relocs_chunk = p->chunk_relocs; | |||
523 | offset = radeon_get_ib_value(p, lo); | |||
524 | idx = radeon_get_ib_value(p, hi); | |||
525 | ||||
526 | if (idx >= relocs_chunk->length_dw) { | |||
527 | DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",__drm_err("Relocs at %d after relocations chunk end %d !\n", idx , relocs_chunk->length_dw) | |||
528 | idx, relocs_chunk->length_dw)__drm_err("Relocs at %d after relocations chunk end %d !\n", idx , relocs_chunk->length_dw); | |||
529 | return -EINVAL22; | |||
530 | } | |||
531 | ||||
532 | reloc = &p->relocs[(idx / 4)]; | |||
533 | start = reloc->gpu_offset; | |||
534 | end = start + radeon_bo_size(reloc->robj); | |||
535 | start += offset; | |||
536 | ||||
537 | p->ib.ptr[lo] = start & 0xFFFFFFFF; | |||
538 | p->ib.ptr[hi] = start >> 32; | |||
539 | ||||
540 | if (end <= start) { | |||
541 | DRM_ERROR("invalid reloc offset %llX!\n", offset)__drm_err("invalid reloc offset %llX!\n", offset); | |||
542 | return -EINVAL22; | |||
543 | } | |||
544 | if ((end - start) < size) { | |||
545 | DRM_ERROR("buffer to small (%d / %d)!\n",__drm_err("buffer to small (%d / %d)!\n", (unsigned)(end - start ), size) | |||
546 | (unsigned)(end - start), size)__drm_err("buffer to small (%d / %d)!\n", (unsigned)(end - start ), size); | |||
547 | return -EINVAL22; | |||
548 | } | |||
549 | ||||
550 | return 0; | |||
551 | } | |||
552 | ||||
553 | /** | |||
554 | * radeon_vce_validate_handle - validate stream handle | |||
555 | * | |||
556 | * @p: parser context | |||
557 | * @handle: handle to validate | |||
558 | * @allocated: allocated a new handle? | |||
559 | * | |||
560 | * Validates the handle and return the found session index or -EINVAL | |||
561 | * we we don't have another free session index. | |||
562 | */ | |||
563 | static int radeon_vce_validate_handle(struct radeon_cs_parser *p, | |||
564 | uint32_t handle, bool_Bool *allocated) | |||
565 | { | |||
566 | unsigned i; | |||
567 | ||||
568 | *allocated = false0; | |||
569 | ||||
570 | /* validate the handle */ | |||
571 | for (i = 0; i < RADEON_MAX_VCE_HANDLES16; ++i) { | |||
572 | if (atomic_read(&p->rdev->vce.handles[i])({ typeof(*(&p->rdev->vce.handles[i])) __tmp = *(volatile typeof(*(&p->rdev->vce.handles[i])) *)&(*(& p->rdev->vce.handles[i])); membar_datadep_consumer(); __tmp ; }) == handle) { | |||
573 | if (p->rdev->vce.filp[i] != p->filp) { | |||
574 | DRM_ERROR("VCE handle collision detected!\n")__drm_err("VCE handle collision detected!\n"); | |||
575 | return -EINVAL22; | |||
576 | } | |||
577 | return i; | |||
578 | } | |||
579 | } | |||
580 | ||||
581 | /* handle not found try to alloc a new one */ | |||
582 | for (i = 0; i < RADEON_MAX_VCE_HANDLES16; ++i) { | |||
583 | if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)__sync_val_compare_and_swap(&p->rdev->vce.handles[i ], 0, handle)) { | |||
584 | p->rdev->vce.filp[i] = p->filp; | |||
585 | p->rdev->vce.img_size[i] = 0; | |||
586 | *allocated = true1; | |||
587 | return i; | |||
588 | } | |||
589 | } | |||
590 | ||||
591 | DRM_ERROR("No more free VCE handles!\n")__drm_err("No more free VCE handles!\n"); | |||
592 | return -EINVAL22; | |||
593 | } | |||
594 | ||||
595 | /** | |||
596 | * radeon_vce_cs_parse - parse and validate the command stream | |||
597 | * | |||
598 | * @p: parser context | |||
599 | * | |||
600 | */ | |||
601 | int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |||
602 | { | |||
603 | int session_idx = -1; | |||
604 | bool_Bool destroyed = false0, created = false0, allocated = false0; | |||
605 | uint32_t tmp, handle = 0; | |||
606 | uint32_t *size = &tmp; | |||
607 | int i, r = 0; | |||
608 | ||||
609 | while (p->idx < p->chunk_ib->length_dw) { | |||
610 | uint32_t len = radeon_get_ib_value(p, p->idx); | |||
611 | uint32_t cmd = radeon_get_ib_value(p, p->idx + 1); | |||
612 | ||||
613 | if ((len < 8) || (len & 3)) { | |||
614 | DRM_ERROR("invalid VCE command length (%d)!\n", len)__drm_err("invalid VCE command length (%d)!\n", len); | |||
615 | r = -EINVAL22; | |||
616 | goto out; | |||
617 | } | |||
618 | ||||
619 | if (destroyed) { | |||
620 | DRM_ERROR("No other command allowed after destroy!\n")__drm_err("No other command allowed after destroy!\n"); | |||
621 | r = -EINVAL22; | |||
622 | goto out; | |||
623 | } | |||
624 | ||||
625 | switch (cmd) { | |||
626 | case 0x00000001: // session | |||
627 | handle = radeon_get_ib_value(p, p->idx + 2); | |||
628 | session_idx = radeon_vce_validate_handle(p, handle, | |||
629 | &allocated); | |||
630 | if (session_idx < 0) | |||
631 | return session_idx; | |||
632 | size = &p->rdev->vce.img_size[session_idx]; | |||
633 | break; | |||
634 | ||||
635 | case 0x00000002: // task info | |||
636 | break; | |||
637 | ||||
638 | case 0x01000001: // create | |||
639 | created = true1; | |||
640 | if (!allocated) { | |||
641 | DRM_ERROR("Handle already in use!\n")__drm_err("Handle already in use!\n"); | |||
642 | r = -EINVAL22; | |||
643 | goto out; | |||
644 | } | |||
645 | ||||
646 | *size = radeon_get_ib_value(p, p->idx + 8) * | |||
647 | radeon_get_ib_value(p, p->idx + 10) * | |||
648 | 8 * 3 / 2; | |||
649 | break; | |||
650 | ||||
651 | case 0x04000001: // config extension | |||
652 | case 0x04000002: // pic control | |||
653 | case 0x04000005: // rate control | |||
654 | case 0x04000007: // motion estimation | |||
655 | case 0x04000008: // rdo | |||
656 | case 0x04000009: // vui | |||
657 | break; | |||
658 | ||||
659 | case 0x03000001: // encode | |||
660 | r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, | |||
661 | *size); | |||
662 | if (r) | |||
663 | goto out; | |||
664 | ||||
665 | r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, | |||
666 | *size / 3); | |||
667 | if (r) | |||
668 | goto out; | |||
669 | break; | |||
670 | ||||
671 | case 0x02000001: // destroy | |||
672 | destroyed = true1; | |||
673 | break; | |||
674 | ||||
675 | case 0x05000001: // context buffer | |||
676 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, | |||
677 | *size * 2); | |||
678 | if (r) | |||
679 | goto out; | |||
680 | break; | |||
681 | ||||
682 | case 0x05000004: // video bitstream buffer | |||
683 | tmp = radeon_get_ib_value(p, p->idx + 4); | |||
684 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, | |||
685 | tmp); | |||
686 | if (r) | |||
687 | goto out; | |||
688 | break; | |||
689 | ||||
690 | case 0x05000005: // feedback buffer | |||
691 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, | |||
692 | 4096); | |||
693 | if (r) | |||
694 | goto out; | |||
695 | break; | |||
696 | ||||
697 | default: | |||
698 | DRM_ERROR("invalid VCE command (0x%x)!\n", cmd)__drm_err("invalid VCE command (0x%x)!\n", cmd); | |||
699 | r = -EINVAL22; | |||
700 | goto out; | |||
701 | } | |||
702 | ||||
703 | if (session_idx == -1) { | |||
704 | DRM_ERROR("no session command at start of IB\n")__drm_err("no session command at start of IB\n"); | |||
705 | r = -EINVAL22; | |||
706 | goto out; | |||
707 | } | |||
708 | ||||
709 | p->idx += len / 4; | |||
710 | } | |||
711 | ||||
712 | if (allocated && !created) { | |||
713 | DRM_ERROR("New session without create command!\n")__drm_err("New session without create command!\n"); | |||
714 | r = -ENOENT2; | |||
715 | } | |||
716 | ||||
717 | out: | |||
718 | if ((!r && destroyed) || (r && allocated)) { | |||
719 | /* | |||
720 | * IB contains a destroy msg or we have allocated an | |||
721 | * handle and got an error, anyway free the handle | |||
722 | */ | |||
723 | for (i = 0; i < RADEON_MAX_VCE_HANDLES16; ++i) | |||
724 | atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0)__sync_val_compare_and_swap(&p->rdev->vce.handles[i ], handle, 0); | |||
725 | } | |||
726 | ||||
727 | return r; | |||
728 | } | |||
729 | ||||
730 | /** | |||
731 | * radeon_vce_semaphore_emit - emit a semaphore command | |||
732 | * | |||
733 | * @rdev: radeon_device pointer | |||
734 | * @ring: engine to use | |||
735 | * @semaphore: address of semaphore | |||
736 | * @emit_wait: true=emit wait, false=emit signal | |||
737 | * | |||
738 | */ | |||
739 | bool_Bool radeon_vce_semaphore_emit(struct radeon_device *rdev, | |||
740 | struct radeon_ring *ring, | |||
741 | struct radeon_semaphore *semaphore, | |||
742 | bool_Bool emit_wait) | |||
743 | { | |||
744 | uint64_t addr = semaphore->gpu_addr; | |||
745 | ||||
746 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE)((__uint32_t)(0x00000006))); | |||
747 | radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF)((__uint32_t)((addr >> 3) & 0x000FFFFF))); | |||
748 | radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF)((__uint32_t)((addr >> 23) & 0x000FFFFF))); | |||
749 | radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0))((__uint32_t)(0x01003000 | (emit_wait ? 1 : 0)))); | |||
750 | if (!emit_wait) | |||
751 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)((__uint32_t)(0x00000001))); | |||
752 | ||||
753 | return true1; | |||
754 | } | |||
755 | ||||
756 | /** | |||
757 | * radeon_vce_ib_execute - execute indirect buffer | |||
758 | * | |||
759 | * @rdev: radeon_device pointer | |||
760 | * @ib: the IB to execute | |||
761 | * | |||
762 | */ | |||
763 | void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
764 | { | |||
765 | struct radeon_ring *ring = &rdev->ring[ib->ring]; | |||
766 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB)((__uint32_t)(0x00000002))); | |||
767 | radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr)((__uint32_t)(ib->gpu_addr))); | |||
768 | radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr))((__uint32_t)(((u32)(((ib->gpu_addr) >> 16) >> 16))))); | |||
769 | radeon_ring_write(ring, cpu_to_le32(ib->length_dw)((__uint32_t)(ib->length_dw))); | |||
770 | } | |||
771 | ||||
772 | /** | |||
773 | * radeon_vce_fence_emit - add a fence command to the ring | |||
774 | * | |||
775 | * @rdev: radeon_device pointer | |||
776 | * @fence: the fence | |||
777 | * | |||
778 | */ | |||
779 | void radeon_vce_fence_emit(struct radeon_device *rdev, | |||
780 | struct radeon_fence *fence) | |||
781 | { | |||
782 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | |||
783 | uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; | |||
784 | ||||
785 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE)((__uint32_t)(0x00000003))); | |||
786 | radeon_ring_write(ring, cpu_to_le32(addr)((__uint32_t)(addr))); | |||
787 | radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr))((__uint32_t)(((u32)(((addr) >> 16) >> 16))))); | |||
788 | radeon_ring_write(ring, cpu_to_le32(fence->seq)((__uint32_t)(fence->seq))); | |||
789 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP)((__uint32_t)(0x00000004))); | |||
790 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)((__uint32_t)(0x00000001))); | |||
791 | } | |||
792 | ||||
793 | /** | |||
794 | * radeon_vce_ring_test - test if VCE ring is working | |||
795 | * | |||
796 | * @rdev: radeon_device pointer | |||
797 | * @ring: the engine to test on | |||
798 | * | |||
799 | */ | |||
800 | int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
801 | { | |||
802 | uint32_t rptr = vce_v1_0_get_rptr(rdev, ring); | |||
803 | unsigned i; | |||
804 | int r; | |||
805 | ||||
806 | r = radeon_ring_lock(rdev, ring, 16); | |||
807 | if (r) { | |||
808 | DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n",__drm_err("radeon: vce failed to lock ring %d (%d).\n", ring-> idx, r) | |||
809 | ring->idx, r)__drm_err("radeon: vce failed to lock ring %d (%d).\n", ring-> idx, r); | |||
810 | return r; | |||
811 | } | |||
812 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)((__uint32_t)(0x00000001))); | |||
813 | radeon_ring_unlock_commit(rdev, ring, false0); | |||
814 | ||||
815 | for (i = 0; i < rdev->usec_timeout; i++) { | |||
816 | if (vce_v1_0_get_rptr(rdev, ring) != rptr) | |||
817 | break; | |||
818 | udelay(1); | |||
819 | } | |||
820 | ||||
821 | if (i < rdev->usec_timeout) { | |||
822 | DRM_INFO("ring test on %d succeeded in %d usecs\n",printk("\0016" "[" "drm" "] " "ring test on %d succeeded in %d usecs\n" , ring->idx, i) | |||
823 | ring->idx, i)printk("\0016" "[" "drm" "] " "ring test on %d succeeded in %d usecs\n" , ring->idx, i); | |||
824 | } else { | |||
825 | DRM_ERROR("radeon: ring %d test failed\n",__drm_err("radeon: ring %d test failed\n", ring->idx) | |||
826 | ring->idx)__drm_err("radeon: ring %d test failed\n", ring->idx); | |||
827 | r = -ETIMEDOUT60; | |||
828 | } | |||
829 | ||||
830 | return r; | |||
831 | } | |||
832 | ||||
833 | /** | |||
834 | * radeon_vce_ib_test - test if VCE IBs are working | |||
835 | * | |||
836 | * @rdev: radeon_device pointer | |||
837 | * @ring: the engine to test on | |||
838 | * | |||
839 | */ | |||
840 | int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
841 | { | |||
842 | struct radeon_fence *fence = NULL((void *)0); | |||
843 | int r; | |||
844 | ||||
845 | r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL((void *)0)); | |||
846 | if (r) { | |||
847 | DRM_ERROR("radeon: failed to get create msg (%d).\n", r)__drm_err("radeon: failed to get create msg (%d).\n", r); | |||
848 | goto error; | |||
849 | } | |||
850 | ||||
851 | r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence); | |||
852 | if (r) { | |||
853 | DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r)__drm_err("radeon: failed to get destroy ib (%d).\n", r); | |||
854 | goto error; | |||
855 | } | |||
856 | ||||
857 | r = radeon_fence_wait_timeout(fence, false0, usecs_to_jiffies((((uint64_t)(1000000)) * hz / 1000000) | |||
858 | RADEON_USEC_IB_TEST_TIMEOUT)(((uint64_t)(1000000)) * hz / 1000000)); | |||
859 | if (r < 0) { | |||
860 | DRM_ERROR("radeon: fence wait failed (%d).\n", r)__drm_err("radeon: fence wait failed (%d).\n", r); | |||
861 | } else if (r == 0) { | |||
862 | DRM_ERROR("radeon: fence wait timed out.\n")__drm_err("radeon: fence wait timed out.\n"); | |||
863 | r = -ETIMEDOUT60; | |||
864 | } else { | |||
865 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx)printk("\0016" "[" "drm" "] " "ib test on ring %d succeeded\n" , ring->idx); | |||
866 | r = 0; | |||
867 | } | |||
868 | error: | |||
869 | radeon_fence_unref(&fence); | |||
870 | return r; | |||
871 | } |
1 | /* $OpenBSD: workqueue.h,v 1.6 2021/08/14 03:12:51 jsg Exp $ */ |
2 | /* |
3 | * Copyright (c) 2015 Mark Kettenis |
4 | * |
5 | * Permission to use, copy, modify, and distribute this software for any |
6 | * purpose with or without fee is hereby granted, provided that the above |
7 | * copyright notice and this permission notice appear in all copies. |
8 | * |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
16 | */ |
17 | |
18 | #ifndef _LINUX_WORKQUEUE_H |
19 | #define _LINUX_WORKQUEUE_H |
20 | |
21 | #include <sys/param.h> |
22 | #include <sys/systm.h> |
23 | #include <sys/task.h> |
24 | #include <sys/timeout.h> |
25 | #include <linux/bitops.h> |
26 | #include <linux/atomic.h> |
27 | #include <linux/rcupdate.h> |
28 | #include <linux/kernel.h> |
29 | #include <linux/lockdep.h> |
30 | #include <linux/timer.h> |
31 | |
32 | struct workqueue_struct; |
33 | |
34 | extern struct workqueue_struct *system_wq; |
35 | extern struct workqueue_struct *system_highpri_wq; |
36 | extern struct workqueue_struct *system_unbound_wq; |
37 | extern struct workqueue_struct *system_long_wq; |
38 | |
39 | #define WQ_HIGHPRI1 1 |
40 | #define WQ_FREEZABLE2 2 |
41 | #define WQ_UNBOUND4 4 |
42 | |
43 | #define WQ_UNBOUND_MAX_ACTIVE4 4 /* matches nthreads in drm_linux.c */ |
44 | |
45 | static inline struct workqueue_struct * |
46 | alloc_workqueue(const char *name, int flags, int max_active) |
47 | { |
48 | struct taskq *tq = taskq_create(name, 1, IPL_TTY0x9, 0); |
49 | return (struct workqueue_struct *)tq; |
50 | } |
51 | |
52 | static inline struct workqueue_struct * |
53 | alloc_ordered_workqueue(const char *name, int flags) |
54 | { |
55 | struct taskq *tq = taskq_create(name, 1, IPL_TTY0x9, 0); |
56 | return (struct workqueue_struct *)tq; |
57 | } |
58 | |
59 | static inline struct workqueue_struct * |
60 | create_singlethread_workqueue(const char *name) |
61 | { |
62 | struct taskq *tq = taskq_create(name, 1, IPL_TTY0x9, 0); |
63 | return (struct workqueue_struct *)tq; |
64 | } |
65 | |
66 | static inline void |
67 | destroy_workqueue(struct workqueue_struct *wq) |
68 | { |
69 | taskq_destroy((struct taskq *)wq); |
70 | } |
71 | |
72 | struct work_struct { |
73 | struct task task; |
74 | struct taskq *tq; |
75 | }; |
76 | |
77 | typedef void (*work_func_t)(struct work_struct *); |
78 | |
79 | static inline void |
80 | INIT_WORK(struct work_struct *work, work_func_t func) |
81 | { |
82 | work->tq = NULL((void *)0); |
83 | task_set(&work->task, (void (*)(void *))func, work); |
84 | } |
85 | |
86 | #define INIT_WORK_ONSTACK(x, y)INIT_WORK((x), (y)) INIT_WORK((x), (y)) |
87 | |
88 | static inline bool_Bool |
89 | queue_work(struct workqueue_struct *wq, struct work_struct *work) |
90 | { |
91 | work->tq = (struct taskq *)wq; |
92 | return task_add(work->tq, &work->task); |
93 | } |
94 | |
95 | static inline void |
96 | cancel_work_sync(struct work_struct *work) |
97 | { |
98 | if (work->tq != NULL((void *)0)) |
99 | task_del(work->tq, &work->task); |
100 | } |
101 | |
102 | #define work_pending(work)((&(work)->task)->t_flags & 1) task_pending(&(work)->task)((&(work)->task)->t_flags & 1) |
103 | |
104 | struct delayed_work { |
105 | struct work_struct work; |
106 | struct timeout to; |
107 | struct taskq *tq; |
108 | }; |
109 | |
110 | #define system_power_efficient_wq((struct workqueue_struct *)systq) ((struct workqueue_struct *)systq) |
111 | |
112 | static inline struct delayed_work * |
113 | to_delayed_work(struct work_struct *work) |
114 | { |
115 | return container_of(work, struct delayed_work, work)({ const __typeof( ((struct delayed_work *)0)->work ) *__mptr = (work); (struct delayed_work *)( (char *)__mptr - __builtin_offsetof (struct delayed_work, work) );}); |
116 | } |
117 | |
118 | static void |
119 | __delayed_work_tick(void *arg) |
120 | { |
121 | struct delayed_work *dwork = arg; |
122 | |
123 | task_add(dwork->tq, &dwork->work.task); |
124 | } |
125 | |
126 | static inline void |
127 | INIT_DELAYED_WORK(struct delayed_work *dwork, work_func_t func) |
128 | { |
129 | INIT_WORK(&dwork->work, func); |
130 | timeout_set(&dwork->to, __delayed_work_tick, &dwork->work); |
131 | } |
132 | |
133 | static inline void |
134 | INIT_DELAYED_WORK_ONSTACK(struct delayed_work *dwork, work_func_t func) |
135 | { |
136 | INIT_WORK(&dwork->work, func); |
137 | timeout_set(&dwork->to, __delayed_work_tick, &dwork->work); |
138 | } |
139 | |
140 | static inline bool_Bool |
141 | schedule_work(struct work_struct *work) |
142 | { |
143 | work->tq = (struct taskq *)system_wq; |
144 | return task_add(work->tq, &work->task); |
145 | } |
146 | |
147 | static inline bool_Bool |
148 | schedule_delayed_work(struct delayed_work *dwork, int jiffies) |
149 | { |
150 | dwork->tq = (struct taskq *)system_wq; |
151 | return timeout_add(&dwork->to, jiffies); |
152 | } |
153 | |
154 | static inline bool_Bool |
155 | queue_delayed_work(struct workqueue_struct *wq, |
156 | struct delayed_work *dwork, int jiffies) |
157 | { |
158 | dwork->tq = (struct taskq *)wq; |
159 | return timeout_add(&dwork->to, jiffies); |
160 | } |
161 | |
162 | static inline bool_Bool |
163 | mod_delayed_work(struct workqueue_struct *wq, |
164 | struct delayed_work *dwork, int jiffies) |
165 | { |
166 | dwork->tq = (struct taskq *)wq; |
167 | return (timeout_add(&dwork->to, jiffies) == 0); |
168 | } |
169 | |
170 | static inline bool_Bool |
171 | cancel_delayed_work(struct delayed_work *dwork) |
172 | { |
173 | if (dwork->tq == NULL((void *)0)) |
174 | return false0; |
175 | if (timeout_del(&dwork->to)) |
176 | return true1; |
177 | return task_del(dwork->tq, &dwork->work.task); |
178 | } |
179 | |
180 | static inline bool_Bool |
181 | cancel_delayed_work_sync(struct delayed_work *dwork) |
182 | { |
183 | if (dwork->tq == NULL((void *)0)) |
184 | return false0; |
185 | if (timeout_del(&dwork->to)) |
186 | return true1; |
187 | return task_del(dwork->tq, &dwork->work.task); |
188 | } |
189 | |
190 | static inline bool_Bool |
191 | delayed_work_pending(struct delayed_work *dwork) |
192 | { |
193 | if (timeout_pending(&dwork->to)((&dwork->to)->to_flags & 0x02)) |
194 | return true1; |
195 | return task_pending(&dwork->work.task)((&dwork->work.task)->t_flags & 1); |
196 | } |
197 | |
198 | void flush_workqueue(struct workqueue_struct *); |
199 | bool_Bool flush_work(struct work_struct *); |
200 | bool_Bool flush_delayed_work(struct delayed_work *); |
201 | #define flush_scheduled_work()flush_workqueue(system_wq) flush_workqueue(system_wq) |
202 | #define drain_workqueue(x)flush_workqueue(x) flush_workqueue(x) |
203 | |
204 | static inline void |
205 | destroy_work_on_stack(struct work_struct *work) |
206 | { |
207 | if (work->tq) |
208 | task_del(work->tq, &work->task); |
209 | } |
210 | |
211 | #define destroy_delayed_work_on_stack(x) |
212 | |
213 | struct rcu_work { |
214 | struct work_struct work; |
215 | struct rcu_head rcu; |
216 | }; |
217 | |
218 | static inline void |
219 | INIT_RCU_WORK(struct rcu_work *work, work_func_t func) |
220 | { |
221 | INIT_WORK(&work->work, func); |
222 | } |
223 | |
224 | static inline bool_Bool |
225 | queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *work) |
226 | { |
227 | return queue_work(wq, &work->work); |
228 | } |
229 | |
230 | #endif |
1 | /* Public domain. */ |
2 | |
3 | #ifndef _LINUX_FIRMWARE_H |
4 | #define _LINUX_FIRMWARE_H |
5 | |
6 | #include <sys/types.h> |
7 | #include <sys/malloc.h> |
8 | #include <sys/device.h> |
9 | #include <linux/types.h> |
10 | #include <linux/gfp.h> |
11 | |
12 | #ifndef __DECONST |
13 | #define __DECONST(type, var)((type)(__uintptr_t)(const void *)(var)) ((type)(__uintptr_t)(const void *)(var)) |
14 | #endif |
15 | |
16 | struct firmware { |
17 | size_t size; |
18 | const u8 *data; |
19 | }; |
20 | |
21 | static inline int |
22 | request_firmware(const struct firmware **fw, const char *name, |
23 | struct device *device) |
24 | { |
25 | int r; |
26 | struct firmware *f = malloc(sizeof(struct firmware), M_DRM145, |
27 | M_WAITOK0x0001 | M_ZERO0x0008); |
28 | r = loadfirmware(name, __DECONST(u_char **, &f->data)((u_char **)(__uintptr_t)(const void *)(&f->data)), &f->size); |
29 | if (r != 0) { |
30 | free(f, M_DRM145, sizeof(struct firmware)); |
31 | *fw = NULL((void *)0); |
32 | return -r; |
33 | } else { |
34 | *fw = f; |
35 | return 0; |
36 | } |
37 | } |
38 | |
39 | static inline int |
40 | request_firmware_direct(const struct firmware **fw, const char *name, |
41 | struct device *device) |
42 | { |
43 | return request_firmware(fw, name, device); |
44 | } |
45 | |
46 | #define request_firmware_nowait(a, b, c, d, e, f, g)-22 -EINVAL22 |
47 | |
48 | static inline void |
49 | release_firmware(const struct firmware *fw) |
50 | { |
51 | if (fw) |
52 | free(__DECONST(u_char *, fw->data)((u_char *)(__uintptr_t)(const void *)(fw->data)), M_DEVBUF2, fw->size); |
53 | free(__DECONST(struct firmware *, fw)((struct firmware *)(__uintptr_t)(const void *)(fw)), M_DRM145, sizeof(*fw)); |
54 | } |
55 | |
56 | #endif |