Bug Summary

File:dev/pci/drm/radeon/radeon_vce.c
Warning:line 97, column 9
Access to field 'size' results in a dereference of a null pointer (loaded from field 'vce_fw')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name radeon_vce.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/radeon/radeon_vce.c

/usr/src/sys/dev/pci/drm/radeon/radeon_vce.c

1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28#include <linux/firmware.h>
29#include <linux/module.h>
30
31#include <drm/drm.h>
32
33#include "radeon.h"
34#include "radeon_asic.h"
35#include "sid.h"
36
37/* 1 second timeout */
38#define VCE_IDLE_TIMEOUT_MS1000 1000
39
40/* Firmware Names */
41#define FIRMWARE_TAHITI"radeon/TAHITI_vce.bin" "radeon/TAHITI_vce.bin"
42#define FIRMWARE_BONAIRE"radeon/BONAIRE_vce.bin" "radeon/BONAIRE_vce.bin"
43
44MODULE_FIRMWARE(FIRMWARE_TAHITI);
45MODULE_FIRMWARE(FIRMWARE_BONAIRE);
46
47static void radeon_vce_idle_work_handler(struct work_struct *work);
48
49/**
50 * radeon_vce_init - allocate memory, load vce firmware
51 *
52 * @rdev: radeon_device pointer
53 *
54 * First step to get VCE online, allocate memory and load the firmware
55 */
56int radeon_vce_init(struct radeon_device *rdev)
57{
58 static const char *fw_version = "[ATI LIB=VCEFW,";
59 static const char *fb_version = "[ATI LIB=VCEFWSTATS,";
60 unsigned long size;
61 const char *fw_name, *c;
62 uint8_t start, mid, end;
63 int i, r;
64
65 INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler);
1
Calling 'INIT_DELAYED_WORK'
3
Returning from 'INIT_DELAYED_WORK'
66
67 switch (rdev->family) {
4
Control jumps to 'case CHIP_MULLINS:' at line 80
68 case CHIP_TAHITI:
69 case CHIP_PITCAIRN:
70 case CHIP_VERDE:
71 case CHIP_OLAND:
72 case CHIP_ARUBA:
73 fw_name = FIRMWARE_TAHITI"radeon/TAHITI_vce.bin";
74 break;
75
76 case CHIP_BONAIRE:
77 case CHIP_KAVERI:
78 case CHIP_KABINI:
79 case CHIP_HAWAII:
80 case CHIP_MULLINS:
81 fw_name = FIRMWARE_BONAIRE"radeon/BONAIRE_vce.bin";
82 break;
5
Execution continues on line 88
83
84 default:
85 return -EINVAL22;
86 }
87
88 r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev);
6
Calling 'request_firmware'
10
Returning from 'request_firmware'
89 if (r) {
11
Assuming 'r' is 0
12
Taking false branch
90 dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n",printf("drm:pid%d:%s *ERROR* " "radeon_vce: Can't load firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name
)
91 fw_name)printf("drm:pid%d:%s *ERROR* " "radeon_vce: Can't load firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name
)
;
92 return r;
93 }
94
95 /* search for firmware version */
96
97 size = rdev->vce_fw->size - strlen(fw_version) - 9;
13
Access to field 'size' results in a dereference of a null pointer (loaded from field 'vce_fw')
98 c = rdev->vce_fw->data;
99 for (;size > 0; --size, ++c)
100 if (strncmp(c, fw_version, strlen(fw_version)) == 0)
101 break;
102
103 if (size == 0)
104 return -EINVAL22;
105
106 c += strlen(fw_version);
107#ifdef notyet
108 if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3)
109 return -EINVAL22;
110#else
111#if 0
112 int x;
113 printf("\n%s A\n", __func__);
114 for (x = 0; x < 16; x++) {
115 printf("%02x ", c[x]);
116 }
117#endif
118 if (c[2] != '.') {
119 printf("%s %s bad start value\n", rdev->self.dv_xname, __func__);
120 return -EINVAL22;
121 }
122 start = (10 * (c[0] - '0')) + (c[1] - '0');
123 c += 3;
124
125 if (c[1] != '.') {
126 printf("%s %s bad mid value\n", rdev->self.dv_xname, __func__);
127 return -EINVAL22;
128 }
129 mid = c[0] - '0';
130 c += 2;
131
132 if (c[1] != ']') {
133 printf("%s %s bad end value\n", rdev->self.dv_xname, __func__);
134 return -EINVAL22;
135 }
136 end = c[0] - '0';
137#endif
138
139 /* search for feedback version */
140
141 size = rdev->vce_fw->size - strlen(fb_version) - 3;
142 c = rdev->vce_fw->data;
143 for (;size > 0; --size, ++c)
144 if (strncmp(c, fb_version, strlen(fb_version)) == 0)
145 break;
146
147 if (size == 0)
148 return -EINVAL22;
149
150 c += strlen(fb_version);
151#ifdef notyet
152 if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1)
153 return -EINVAL22;
154#else
155#if 0
156 printf("\n%s B\n", __func__);
157 for (x = 0; x < 16; x++) {
158 printf("%02x ", c[x]);
159 }
160 printf("\n");
161#endif
162 if (c[2] != ']') {
163 printf("%s %s bad fb_version value\n", rdev->self.dv_xname, __func__);
164 return -EINVAL22;
165 }
166 rdev->vce.fb_version = (10 * (c[0] - '0')) + (c[1] - '0');
167#endif
168
169 DRM_INFO("Found VCE firmware/feedback version %d.%d.%d / %d!\n",printk("\0016" "[" "drm" "] " "Found VCE firmware/feedback version %d.%d.%d / %d!\n"
, start, mid, end, rdev->vce.fb_version)
170 start, mid, end, rdev->vce.fb_version)printk("\0016" "[" "drm" "] " "Found VCE firmware/feedback version %d.%d.%d / %d!\n"
, start, mid, end, rdev->vce.fb_version)
;
171
172 rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
173
174 /* we can only work with this fw version for now */
175 if ((rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) &&
176 (rdev->vce.fw_version != ((50 << 24) | (0 << 16) | (1 << 8))) &&
177 (rdev->vce.fw_version != ((50 << 24) | (1 << 16) | (2 << 8))))
178 return -EINVAL22;
179
180 /* allocate firmware, stack and heap BO */
181
182 if (rdev->family < CHIP_BONAIRE)
183 size = vce_v1_0_bo_size(rdev);
184 else
185 size = vce_v2_0_bo_size(rdev);
186 r = radeon_bo_create(rdev, size, PAGE_SIZE(1 << 12), true1,
187 RADEON_GEM_DOMAIN_VRAM0x4, 0, NULL((void *)0), NULL((void *)0),
188 &rdev->vce.vcpu_bo);
189 if (r) {
190 dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to allocate VCE bo\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
191 return r;
192 }
193
194 r = radeon_bo_reserve(rdev->vce.vcpu_bo, false0);
195 if (r) {
196 radeon_bo_unref(&rdev->vce.vcpu_bo);
197 dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to reserve VCE bo\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
198 return r;
199 }
200
201 r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM0x4,
202 &rdev->vce.gpu_addr);
203 radeon_bo_unreserve(rdev->vce.vcpu_bo);
204 if (r) {
205 radeon_bo_unref(&rdev->vce.vcpu_bo);
206 dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) VCE bo pin failed\n", ({
struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
207 return r;
208 }
209
210 for (i = 0; i < RADEON_MAX_VCE_HANDLES16; ++i) {
211 atomic_set(&rdev->vce.handles[i], 0)({ typeof(*(&rdev->vce.handles[i])) __tmp = ((0)); *(volatile
typeof(*(&rdev->vce.handles[i])) *)&(*(&rdev->
vce.handles[i])) = __tmp; __tmp; })
;
212 rdev->vce.filp[i] = NULL((void *)0);
213 }
214
215 return 0;
216}
217
218/**
219 * radeon_vce_fini - free memory
220 *
221 * @rdev: radeon_device pointer
222 *
223 * Last step on VCE teardown, free firmware memory
224 */
225void radeon_vce_fini(struct radeon_device *rdev)
226{
227 if (rdev->vce.vcpu_bo == NULL((void *)0))
228 return;
229
230 radeon_bo_unref(&rdev->vce.vcpu_bo);
231
232 release_firmware(rdev->vce_fw);
233}
234
235/**
236 * radeon_vce_suspend - unpin VCE fw memory
237 *
238 * @rdev: radeon_device pointer
239 *
240 */
241int radeon_vce_suspend(struct radeon_device *rdev)
242{
243 int i;
244
245 if (rdev->vce.vcpu_bo == NULL((void *)0))
246 return 0;
247
248 for (i = 0; i < RADEON_MAX_VCE_HANDLES16; ++i)
249 if (atomic_read(&rdev->vce.handles[i])({ typeof(*(&rdev->vce.handles[i])) __tmp = *(volatile
typeof(*(&rdev->vce.handles[i])) *)&(*(&rdev->
vce.handles[i])); membar_datadep_consumer(); __tmp; })
)
250 break;
251
252 if (i == RADEON_MAX_VCE_HANDLES16)
253 return 0;
254
255 /* TODO: suspending running encoding sessions isn't supported */
256 return -EINVAL22;
257}
258
259/**
260 * radeon_vce_resume - pin VCE fw memory
261 *
262 * @rdev: radeon_device pointer
263 *
264 */
265int radeon_vce_resume(struct radeon_device *rdev)
266{
267 void *cpu_addr;
268 int r;
269
270 if (rdev->vce.vcpu_bo == NULL((void *)0))
271 return -EINVAL22;
272
273 r = radeon_bo_reserve(rdev->vce.vcpu_bo, false0);
274 if (r) {
275 dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to reserve VCE bo\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
276 return r;
277 }
278
279 r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr);
280 if (r) {
281 radeon_bo_unreserve(rdev->vce.vcpu_bo);
282 dev_err(rdev->dev, "(%d) VCE map failed\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) VCE map failed\n", ({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_curproc->p_p->ps_pid, __func__ , r)
;
283 return r;
284 }
285
286 memset(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo))__builtin_memset((cpu_addr), (0), (radeon_bo_size(rdev->vce
.vcpu_bo)))
;
287 if (rdev->family < CHIP_BONAIRE)
288 r = vce_v1_0_load_fw(rdev, cpu_addr);
289 else
290 memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size)__builtin_memcpy((cpu_addr), (rdev->vce_fw->data), (rdev
->vce_fw->size))
;
291
292 radeon_bo_kunmap(rdev->vce.vcpu_bo);
293
294 radeon_bo_unreserve(rdev->vce.vcpu_bo);
295
296 return r;
297}
298
299/**
300 * radeon_vce_idle_work_handler - power off VCE
301 *
302 * @work: pointer to work structure
303 *
304 * power of VCE when it's not used any more
305 */
306static void radeon_vce_idle_work_handler(struct work_struct *work)
307{
308 struct radeon_device *rdev =
309 container_of(work, struct radeon_device, vce.idle_work.work)({ const __typeof( ((struct radeon_device *)0)->vce.idle_work
.work ) *__mptr = (work); (struct radeon_device *)( (char *)__mptr
- __builtin_offsetof(struct radeon_device, vce.idle_work.work
) );})
;
310
311 if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX6) == 0) &&
312 (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX7) == 0)) {
313 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
314 radeon_dpm_enable_vce(rdev, false0);
315 } else {
316 radeon_set_vce_clocks(rdev, 0, 0)(rdev)->asic->pm.set_vce_clocks((rdev), (0), (0));
317 }
318 } else {
319 schedule_delayed_work(&rdev->vce.idle_work,
320 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)(((uint64_t)(1000)) * hz / 1000));
321 }
322}
323
324/**
325 * radeon_vce_note_usage - power up VCE
326 *
327 * @rdev: radeon_device pointer
328 *
329 * Make sure VCE is powerd up when we want to use it
330 */
331void radeon_vce_note_usage(struct radeon_device *rdev)
332{
333 bool_Bool streams_changed = false0;
334 bool_Bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work);
335 set_clocks &= schedule_delayed_work(&rdev->vce.idle_work,
336 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)(((uint64_t)(1000)) * hz / 1000));
337
338 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
339 /* XXX figure out if the streams changed */
340 streams_changed = false0;
341 }
342
343 if (set_clocks || streams_changed) {
344 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
345 radeon_dpm_enable_vce(rdev, true1);
346 } else {
347 radeon_set_vce_clocks(rdev, 53300, 40000)(rdev)->asic->pm.set_vce_clocks((rdev), (53300), (40000
))
;
348 }
349 }
350}
351
352/**
353 * radeon_vce_free_handles - free still open VCE handles
354 *
355 * @rdev: radeon_device pointer
356 * @filp: drm file pointer
357 *
358 * Close all VCE handles still open by this file pointer
359 */
360void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp)
361{
362 int i, r;
363 for (i = 0; i < RADEON_MAX_VCE_HANDLES16; ++i) {
364 uint32_t handle = atomic_read(&rdev->vce.handles[i])({ typeof(*(&rdev->vce.handles[i])) __tmp = *(volatile
typeof(*(&rdev->vce.handles[i])) *)&(*(&rdev->
vce.handles[i])); membar_datadep_consumer(); __tmp; })
;
365 if (!handle || rdev->vce.filp[i] != filp)
366 continue;
367
368 radeon_vce_note_usage(rdev);
369
370 r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX6,
371 handle, NULL((void *)0));
372 if (r)
373 DRM_ERROR("Error destroying VCE handle (%d)!\n", r)__drm_err("Error destroying VCE handle (%d)!\n", r);
374
375 rdev->vce.filp[i] = NULL((void *)0);
376 atomic_set(&rdev->vce.handles[i], 0)({ typeof(*(&rdev->vce.handles[i])) __tmp = ((0)); *(volatile
typeof(*(&rdev->vce.handles[i])) *)&(*(&rdev->
vce.handles[i])) = __tmp; __tmp; })
;
377 }
378}
379
380/**
381 * radeon_vce_get_create_msg - generate a VCE create msg
382 *
383 * @rdev: radeon_device pointer
384 * @ring: ring we should submit the msg to
385 * @handle: VCE session handle to use
386 * @fence: optional fence to return
387 *
388 * Open up a stream for HW test
389 */
390int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
391 uint32_t handle, struct radeon_fence **fence)
392{
393 const unsigned ib_size_dw = 1024;
394 struct radeon_ib ib;
395 uint64_t dummy;
396 int i, r;
397
398 r = radeon_ib_get(rdev, ring, &ib, NULL((void *)0), ib_size_dw * 4);
399 if (r) {
400 DRM_ERROR("radeon: failed to get ib (%d).\n", r)__drm_err("radeon: failed to get ib (%d).\n", r);
401 return r;
402 }
403
404 dummy = ib.gpu_addr + 1024;
405
406 /* stitch together an VCE create msg */
407 ib.length_dw = 0;
408 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c)((__uint32_t)(0x0000000c)); /* len */
409 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001)((__uint32_t)(0x00000001)); /* session cmd */
410 ib.ptr[ib.length_dw++] = cpu_to_le32(handle)((__uint32_t)(handle));
411
412 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000030)((__uint32_t)(0x00000030)); /* len */
413 ib.ptr[ib.length_dw++] = cpu_to_le32(0x01000001)((__uint32_t)(0x01000001)); /* create cmd */
414 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
415 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000042)((__uint32_t)(0x00000042));
416 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000a)((__uint32_t)(0x0000000a));
417 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001)((__uint32_t)(0x00000001));
418 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000080)((__uint32_t)(0x00000080));
419 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000060)((__uint32_t)(0x00000060));
420 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100)((__uint32_t)(0x00000100));
421 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100)((__uint32_t)(0x00000100));
422 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c)((__uint32_t)(0x0000000c));
423 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
424
425 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014)((__uint32_t)(0x00000014)); /* len */
426 ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005)((__uint32_t)(0x05000005)); /* feedback buffer */
427 ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy))((__uint32_t)(((u32)(((dummy) >> 16) >> 16))));
428 ib.ptr[ib.length_dw++] = cpu_to_le32(dummy)((__uint32_t)(dummy));
429 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001)((__uint32_t)(0x00000001));
430
431 for (i = ib.length_dw; i < ib_size_dw; ++i)
432 ib.ptr[i] = cpu_to_le32(0x0)((__uint32_t)(0x0));
433
434 r = radeon_ib_schedule(rdev, &ib, NULL((void *)0), false0);
435 if (r)
436 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r)__drm_err("radeon: failed to schedule ib (%d).\n", r);
437
438
439 if (fence)
440 *fence = radeon_fence_ref(ib.fence);
441
442 radeon_ib_free(rdev, &ib);
443
444 return r;
445}
446
447/**
448 * radeon_vce_get_destroy_msg - generate a VCE destroy msg
449 *
450 * @rdev: radeon_device pointer
451 * @ring: ring we should submit the msg to
452 * @handle: VCE session handle to use
453 * @fence: optional fence to return
454 *
455 * Close up a stream for HW test or if userspace failed to do so
456 */
457int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
458 uint32_t handle, struct radeon_fence **fence)
459{
460 const unsigned ib_size_dw = 1024;
461 struct radeon_ib ib;
462 uint64_t dummy;
463 int i, r;
464
465 r = radeon_ib_get(rdev, ring, &ib, NULL((void *)0), ib_size_dw * 4);
466 if (r) {
467 DRM_ERROR("radeon: failed to get ib (%d).\n", r)__drm_err("radeon: failed to get ib (%d).\n", r);
468 return r;
469 }
470
471 dummy = ib.gpu_addr + 1024;
472
473 /* stitch together an VCE destroy msg */
474 ib.length_dw = 0;
475 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c)((__uint32_t)(0x0000000c)); /* len */
476 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001)((__uint32_t)(0x00000001)); /* session cmd */
477 ib.ptr[ib.length_dw++] = cpu_to_le32(handle)((__uint32_t)(handle));
478
479 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014)((__uint32_t)(0x00000014)); /* len */
480 ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005)((__uint32_t)(0x05000005)); /* feedback buffer */
481 ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy))((__uint32_t)(((u32)(((dummy) >> 16) >> 16))));
482 ib.ptr[ib.length_dw++] = cpu_to_le32(dummy)((__uint32_t)(dummy));
483 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001)((__uint32_t)(0x00000001));
484
485 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008)((__uint32_t)(0x00000008)); /* len */
486 ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001)((__uint32_t)(0x02000001)); /* destroy cmd */
487
488 for (i = ib.length_dw; i < ib_size_dw; ++i)
489 ib.ptr[i] = cpu_to_le32(0x0)((__uint32_t)(0x0));
490
491 r = radeon_ib_schedule(rdev, &ib, NULL((void *)0), false0);
492 if (r) {
493 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r)__drm_err("radeon: failed to schedule ib (%d).\n", r);
494 }
495
496 if (fence)
497 *fence = radeon_fence_ref(ib.fence);
498
499 radeon_ib_free(rdev, &ib);
500
501 return r;
502}
503
504/**
505 * radeon_vce_cs_reloc - command submission relocation
506 *
507 * @p: parser context
508 * @lo: address of lower dword
509 * @hi: address of higher dword
510 * @size: size of checker for relocation buffer
511 *
512 * Patch relocation inside command stream with real buffer address
513 */
514int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
515 unsigned size)
516{
517 struct radeon_cs_chunk *relocs_chunk;
518 struct radeon_bo_list *reloc;
519 uint64_t start, end, offset;
520 unsigned idx;
521
522 relocs_chunk = p->chunk_relocs;
523 offset = radeon_get_ib_value(p, lo);
524 idx = radeon_get_ib_value(p, hi);
525
526 if (idx >= relocs_chunk->length_dw) {
527 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",__drm_err("Relocs at %d after relocations chunk end %d !\n", idx
, relocs_chunk->length_dw)
528 idx, relocs_chunk->length_dw)__drm_err("Relocs at %d after relocations chunk end %d !\n", idx
, relocs_chunk->length_dw)
;
529 return -EINVAL22;
530 }
531
532 reloc = &p->relocs[(idx / 4)];
533 start = reloc->gpu_offset;
534 end = start + radeon_bo_size(reloc->robj);
535 start += offset;
536
537 p->ib.ptr[lo] = start & 0xFFFFFFFF;
538 p->ib.ptr[hi] = start >> 32;
539
540 if (end <= start) {
541 DRM_ERROR("invalid reloc offset %llX!\n", offset)__drm_err("invalid reloc offset %llX!\n", offset);
542 return -EINVAL22;
543 }
544 if ((end - start) < size) {
545 DRM_ERROR("buffer to small (%d / %d)!\n",__drm_err("buffer to small (%d / %d)!\n", (unsigned)(end - start
), size)
546 (unsigned)(end - start), size)__drm_err("buffer to small (%d / %d)!\n", (unsigned)(end - start
), size)
;
547 return -EINVAL22;
548 }
549
550 return 0;
551}
552
553/**
554 * radeon_vce_validate_handle - validate stream handle
555 *
556 * @p: parser context
557 * @handle: handle to validate
558 * @allocated: allocated a new handle?
559 *
560 * Validates the handle and return the found session index or -EINVAL
561 * we we don't have another free session index.
562 */
563static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
564 uint32_t handle, bool_Bool *allocated)
565{
566 unsigned i;
567
568 *allocated = false0;
569
570 /* validate the handle */
571 for (i = 0; i < RADEON_MAX_VCE_HANDLES16; ++i) {
572 if (atomic_read(&p->rdev->vce.handles[i])({ typeof(*(&p->rdev->vce.handles[i])) __tmp = *(volatile
typeof(*(&p->rdev->vce.handles[i])) *)&(*(&
p->rdev->vce.handles[i])); membar_datadep_consumer(); __tmp
; })
== handle) {
573 if (p->rdev->vce.filp[i] != p->filp) {
574 DRM_ERROR("VCE handle collision detected!\n")__drm_err("VCE handle collision detected!\n");
575 return -EINVAL22;
576 }
577 return i;
578 }
579 }
580
581 /* handle not found try to alloc a new one */
582 for (i = 0; i < RADEON_MAX_VCE_HANDLES16; ++i) {
583 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)__sync_val_compare_and_swap(&p->rdev->vce.handles[i
], 0, handle)
) {
584 p->rdev->vce.filp[i] = p->filp;
585 p->rdev->vce.img_size[i] = 0;
586 *allocated = true1;
587 return i;
588 }
589 }
590
591 DRM_ERROR("No more free VCE handles!\n")__drm_err("No more free VCE handles!\n");
592 return -EINVAL22;
593}
594
595/**
596 * radeon_vce_cs_parse - parse and validate the command stream
597 *
598 * @p: parser context
599 *
600 */
601int radeon_vce_cs_parse(struct radeon_cs_parser *p)
602{
603 int session_idx = -1;
604 bool_Bool destroyed = false0, created = false0, allocated = false0;
605 uint32_t tmp, handle = 0;
606 uint32_t *size = &tmp;
607 int i, r = 0;
608
609 while (p->idx < p->chunk_ib->length_dw) {
610 uint32_t len = radeon_get_ib_value(p, p->idx);
611 uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
612
613 if ((len < 8) || (len & 3)) {
614 DRM_ERROR("invalid VCE command length (%d)!\n", len)__drm_err("invalid VCE command length (%d)!\n", len);
615 r = -EINVAL22;
616 goto out;
617 }
618
619 if (destroyed) {
620 DRM_ERROR("No other command allowed after destroy!\n")__drm_err("No other command allowed after destroy!\n");
621 r = -EINVAL22;
622 goto out;
623 }
624
625 switch (cmd) {
626 case 0x00000001: // session
627 handle = radeon_get_ib_value(p, p->idx + 2);
628 session_idx = radeon_vce_validate_handle(p, handle,
629 &allocated);
630 if (session_idx < 0)
631 return session_idx;
632 size = &p->rdev->vce.img_size[session_idx];
633 break;
634
635 case 0x00000002: // task info
636 break;
637
638 case 0x01000001: // create
639 created = true1;
640 if (!allocated) {
641 DRM_ERROR("Handle already in use!\n")__drm_err("Handle already in use!\n");
642 r = -EINVAL22;
643 goto out;
644 }
645
646 *size = radeon_get_ib_value(p, p->idx + 8) *
647 radeon_get_ib_value(p, p->idx + 10) *
648 8 * 3 / 2;
649 break;
650
651 case 0x04000001: // config extension
652 case 0x04000002: // pic control
653 case 0x04000005: // rate control
654 case 0x04000007: // motion estimation
655 case 0x04000008: // rdo
656 case 0x04000009: // vui
657 break;
658
659 case 0x03000001: // encode
660 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
661 *size);
662 if (r)
663 goto out;
664
665 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
666 *size / 3);
667 if (r)
668 goto out;
669 break;
670
671 case 0x02000001: // destroy
672 destroyed = true1;
673 break;
674
675 case 0x05000001: // context buffer
676 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
677 *size * 2);
678 if (r)
679 goto out;
680 break;
681
682 case 0x05000004: // video bitstream buffer
683 tmp = radeon_get_ib_value(p, p->idx + 4);
684 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
685 tmp);
686 if (r)
687 goto out;
688 break;
689
690 case 0x05000005: // feedback buffer
691 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
692 4096);
693 if (r)
694 goto out;
695 break;
696
697 default:
698 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd)__drm_err("invalid VCE command (0x%x)!\n", cmd);
699 r = -EINVAL22;
700 goto out;
701 }
702
703 if (session_idx == -1) {
704 DRM_ERROR("no session command at start of IB\n")__drm_err("no session command at start of IB\n");
705 r = -EINVAL22;
706 goto out;
707 }
708
709 p->idx += len / 4;
710 }
711
712 if (allocated && !created) {
713 DRM_ERROR("New session without create command!\n")__drm_err("New session without create command!\n");
714 r = -ENOENT2;
715 }
716
717out:
718 if ((!r && destroyed) || (r && allocated)) {
719 /*
720 * IB contains a destroy msg or we have allocated an
721 * handle and got an error, anyway free the handle
722 */
723 for (i = 0; i < RADEON_MAX_VCE_HANDLES16; ++i)
724 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0)__sync_val_compare_and_swap(&p->rdev->vce.handles[i
], handle, 0)
;
725 }
726
727 return r;
728}
729
730/**
731 * radeon_vce_semaphore_emit - emit a semaphore command
732 *
733 * @rdev: radeon_device pointer
734 * @ring: engine to use
735 * @semaphore: address of semaphore
736 * @emit_wait: true=emit wait, false=emit signal
737 *
738 */
739bool_Bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
740 struct radeon_ring *ring,
741 struct radeon_semaphore *semaphore,
742 bool_Bool emit_wait)
743{
744 uint64_t addr = semaphore->gpu_addr;
745
746 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE)((__uint32_t)(0x00000006)));
747 radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF)((__uint32_t)((addr >> 3) & 0x000FFFFF)));
748 radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF)((__uint32_t)((addr >> 23) & 0x000FFFFF)));
749 radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0))((__uint32_t)(0x01003000 | (emit_wait ? 1 : 0))));
750 if (!emit_wait)
751 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)((__uint32_t)(0x00000001)));
752
753 return true1;
754}
755
756/**
757 * radeon_vce_ib_execute - execute indirect buffer
758 *
759 * @rdev: radeon_device pointer
760 * @ib: the IB to execute
761 *
762 */
763void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
764{
765 struct radeon_ring *ring = &rdev->ring[ib->ring];
766 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB)((__uint32_t)(0x00000002)));
767 radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr)((__uint32_t)(ib->gpu_addr)));
768 radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr))((__uint32_t)(((u32)(((ib->gpu_addr) >> 16) >>
16))))
);
769 radeon_ring_write(ring, cpu_to_le32(ib->length_dw)((__uint32_t)(ib->length_dw)));
770}
771
772/**
773 * radeon_vce_fence_emit - add a fence command to the ring
774 *
775 * @rdev: radeon_device pointer
776 * @fence: the fence
777 *
778 */
779void radeon_vce_fence_emit(struct radeon_device *rdev,
780 struct radeon_fence *fence)
781{
782 struct radeon_ring *ring = &rdev->ring[fence->ring];
783 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
784
785 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE)((__uint32_t)(0x00000003)));
786 radeon_ring_write(ring, cpu_to_le32(addr)((__uint32_t)(addr)));
787 radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr))((__uint32_t)(((u32)(((addr) >> 16) >> 16)))));
788 radeon_ring_write(ring, cpu_to_le32(fence->seq)((__uint32_t)(fence->seq)));
789 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP)((__uint32_t)(0x00000004)));
790 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)((__uint32_t)(0x00000001)));
791}
792
793/**
794 * radeon_vce_ring_test - test if VCE ring is working
795 *
796 * @rdev: radeon_device pointer
797 * @ring: the engine to test on
798 *
799 */
800int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
801{
802 uint32_t rptr = vce_v1_0_get_rptr(rdev, ring);
803 unsigned i;
804 int r;
805
806 r = radeon_ring_lock(rdev, ring, 16);
807 if (r) {
808 DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n",__drm_err("radeon: vce failed to lock ring %d (%d).\n", ring->
idx, r)
809 ring->idx, r)__drm_err("radeon: vce failed to lock ring %d (%d).\n", ring->
idx, r)
;
810 return r;
811 }
812 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)((__uint32_t)(0x00000001)));
813 radeon_ring_unlock_commit(rdev, ring, false0);
814
815 for (i = 0; i < rdev->usec_timeout; i++) {
816 if (vce_v1_0_get_rptr(rdev, ring) != rptr)
817 break;
818 udelay(1);
819 }
820
821 if (i < rdev->usec_timeout) {
822 DRM_INFO("ring test on %d succeeded in %d usecs\n",printk("\0016" "[" "drm" "] " "ring test on %d succeeded in %d usecs\n"
, ring->idx, i)
823 ring->idx, i)printk("\0016" "[" "drm" "] " "ring test on %d succeeded in %d usecs\n"
, ring->idx, i)
;
824 } else {
825 DRM_ERROR("radeon: ring %d test failed\n",__drm_err("radeon: ring %d test failed\n", ring->idx)
826 ring->idx)__drm_err("radeon: ring %d test failed\n", ring->idx);
827 r = -ETIMEDOUT60;
828 }
829
830 return r;
831}
832
833/**
834 * radeon_vce_ib_test - test if VCE IBs are working
835 *
836 * @rdev: radeon_device pointer
837 * @ring: the engine to test on
838 *
839 */
840int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
841{
842 struct radeon_fence *fence = NULL((void *)0);
843 int r;
844
845 r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL((void *)0));
846 if (r) {
847 DRM_ERROR("radeon: failed to get create msg (%d).\n", r)__drm_err("radeon: failed to get create msg (%d).\n", r);
848 goto error;
849 }
850
851 r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence);
852 if (r) {
853 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r)__drm_err("radeon: failed to get destroy ib (%d).\n", r);
854 goto error;
855 }
856
857 r = radeon_fence_wait_timeout(fence, false0, usecs_to_jiffies((((uint64_t)(1000000)) * hz / 1000000)
858 RADEON_USEC_IB_TEST_TIMEOUT)(((uint64_t)(1000000)) * hz / 1000000));
859 if (r < 0) {
860 DRM_ERROR("radeon: fence wait failed (%d).\n", r)__drm_err("radeon: fence wait failed (%d).\n", r);
861 } else if (r == 0) {
862 DRM_ERROR("radeon: fence wait timed out.\n")__drm_err("radeon: fence wait timed out.\n");
863 r = -ETIMEDOUT60;
864 } else {
865 DRM_INFO("ib test on ring %d succeeded\n", ring->idx)printk("\0016" "[" "drm" "] " "ib test on ring %d succeeded\n"
, ring->idx)
;
866 r = 0;
867 }
868error:
869 radeon_fence_unref(&fence);
870 return r;
871}

/usr/src/sys/dev/pci/drm/include/linux/workqueue.h

1/* $OpenBSD: workqueue.h,v 1.6 2021/08/14 03:12:51 jsg Exp $ */
2/*
3 * Copyright (c) 2015 Mark Kettenis
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _LINUX_WORKQUEUE_H
19#define _LINUX_WORKQUEUE_H
20
21#include <sys/param.h>
22#include <sys/systm.h>
23#include <sys/task.h>
24#include <sys/timeout.h>
25#include <linux/bitops.h>
26#include <linux/atomic.h>
27#include <linux/rcupdate.h>
28#include <linux/kernel.h>
29#include <linux/lockdep.h>
30#include <linux/timer.h>
31
32struct workqueue_struct;
33
34extern struct workqueue_struct *system_wq;
35extern struct workqueue_struct *system_highpri_wq;
36extern struct workqueue_struct *system_unbound_wq;
37extern struct workqueue_struct *system_long_wq;
38
39#define WQ_HIGHPRI1 1
40#define WQ_FREEZABLE2 2
41#define WQ_UNBOUND4 4
42
43#define WQ_UNBOUND_MAX_ACTIVE4 4 /* matches nthreads in drm_linux.c */
44
45static inline struct workqueue_struct *
46alloc_workqueue(const char *name, int flags, int max_active)
47{
48 struct taskq *tq = taskq_create(name, 1, IPL_TTY0x9, 0);
49 return (struct workqueue_struct *)tq;
50}
51
52static inline struct workqueue_struct *
53alloc_ordered_workqueue(const char *name, int flags)
54{
55 struct taskq *tq = taskq_create(name, 1, IPL_TTY0x9, 0);
56 return (struct workqueue_struct *)tq;
57}
58
59static inline struct workqueue_struct *
60create_singlethread_workqueue(const char *name)
61{
62 struct taskq *tq = taskq_create(name, 1, IPL_TTY0x9, 0);
63 return (struct workqueue_struct *)tq;
64}
65
66static inline void
67destroy_workqueue(struct workqueue_struct *wq)
68{
69 taskq_destroy((struct taskq *)wq);
70}
71
72struct work_struct {
73 struct task task;
74 struct taskq *tq;
75};
76
77typedef void (*work_func_t)(struct work_struct *);
78
79static inline void
80INIT_WORK(struct work_struct *work, work_func_t func)
81{
82 work->tq = NULL((void *)0);
83 task_set(&work->task, (void (*)(void *))func, work);
84}
85
86#define INIT_WORK_ONSTACK(x, y)INIT_WORK((x), (y)) INIT_WORK((x), (y))
87
88static inline bool_Bool
89queue_work(struct workqueue_struct *wq, struct work_struct *work)
90{
91 work->tq = (struct taskq *)wq;
92 return task_add(work->tq, &work->task);
93}
94
95static inline void
96cancel_work_sync(struct work_struct *work)
97{
98 if (work->tq != NULL((void *)0))
99 task_del(work->tq, &work->task);
100}
101
102#define work_pending(work)((&(work)->task)->t_flags & 1) task_pending(&(work)->task)((&(work)->task)->t_flags & 1)
103
104struct delayed_work {
105 struct work_struct work;
106 struct timeout to;
107 struct taskq *tq;
108};
109
110#define system_power_efficient_wq((struct workqueue_struct *)systq) ((struct workqueue_struct *)systq)
111
112static inline struct delayed_work *
113to_delayed_work(struct work_struct *work)
114{
115 return container_of(work, struct delayed_work, work)({ const __typeof( ((struct delayed_work *)0)->work ) *__mptr
= (work); (struct delayed_work *)( (char *)__mptr - __builtin_offsetof
(struct delayed_work, work) );})
;
116}
117
118static void
119__delayed_work_tick(void *arg)
120{
121 struct delayed_work *dwork = arg;
122
123 task_add(dwork->tq, &dwork->work.task);
124}
125
126static inline void
127INIT_DELAYED_WORK(struct delayed_work *dwork, work_func_t func)
128{
129 INIT_WORK(&dwork->work, func);
130 timeout_set(&dwork->to, __delayed_work_tick, &dwork->work);
2
Value assigned to field 'family', which participates in a condition later
131}
132
133static inline void
134INIT_DELAYED_WORK_ONSTACK(struct delayed_work *dwork, work_func_t func)
135{
136 INIT_WORK(&dwork->work, func);
137 timeout_set(&dwork->to, __delayed_work_tick, &dwork->work);
138}
139
140static inline bool_Bool
141schedule_work(struct work_struct *work)
142{
143 work->tq = (struct taskq *)system_wq;
144 return task_add(work->tq, &work->task);
145}
146
147static inline bool_Bool
148schedule_delayed_work(struct delayed_work *dwork, int jiffies)
149{
150 dwork->tq = (struct taskq *)system_wq;
151 return timeout_add(&dwork->to, jiffies);
152}
153
154static inline bool_Bool
155queue_delayed_work(struct workqueue_struct *wq,
156 struct delayed_work *dwork, int jiffies)
157{
158 dwork->tq = (struct taskq *)wq;
159 return timeout_add(&dwork->to, jiffies);
160}
161
162static inline bool_Bool
163mod_delayed_work(struct workqueue_struct *wq,
164 struct delayed_work *dwork, int jiffies)
165{
166 dwork->tq = (struct taskq *)wq;
167 return (timeout_add(&dwork->to, jiffies) == 0);
168}
169
170static inline bool_Bool
171cancel_delayed_work(struct delayed_work *dwork)
172{
173 if (dwork->tq == NULL((void *)0))
174 return false0;
175 if (timeout_del(&dwork->to))
176 return true1;
177 return task_del(dwork->tq, &dwork->work.task);
178}
179
180static inline bool_Bool
181cancel_delayed_work_sync(struct delayed_work *dwork)
182{
183 if (dwork->tq == NULL((void *)0))
184 return false0;
185 if (timeout_del(&dwork->to))
186 return true1;
187 return task_del(dwork->tq, &dwork->work.task);
188}
189
190static inline bool_Bool
191delayed_work_pending(struct delayed_work *dwork)
192{
193 if (timeout_pending(&dwork->to)((&dwork->to)->to_flags & 0x02))
194 return true1;
195 return task_pending(&dwork->work.task)((&dwork->work.task)->t_flags & 1);
196}
197
198void flush_workqueue(struct workqueue_struct *);
199bool_Bool flush_work(struct work_struct *);
200bool_Bool flush_delayed_work(struct delayed_work *);
201#define flush_scheduled_work()flush_workqueue(system_wq) flush_workqueue(system_wq)
202#define drain_workqueue(x)flush_workqueue(x) flush_workqueue(x)
203
204static inline void
205destroy_work_on_stack(struct work_struct *work)
206{
207 if (work->tq)
208 task_del(work->tq, &work->task);
209}
210
211#define destroy_delayed_work_on_stack(x)
212
213struct rcu_work {
214 struct work_struct work;
215 struct rcu_head rcu;
216};
217
218static inline void
219INIT_RCU_WORK(struct rcu_work *work, work_func_t func)
220{
221 INIT_WORK(&work->work, func);
222}
223
224static inline bool_Bool
225queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *work)
226{
227 return queue_work(wq, &work->work);
228}
229
230#endif

/usr/src/sys/dev/pci/drm/include/linux/firmware.h

1/* Public domain. */
2
3#ifndef _LINUX_FIRMWARE_H
4#define _LINUX_FIRMWARE_H
5
6#include <sys/types.h>
7#include <sys/malloc.h>
8#include <sys/device.h>
9#include <linux/types.h>
10#include <linux/gfp.h>
11
12#ifndef __DECONST
13#define __DECONST(type, var)((type)(__uintptr_t)(const void *)(var)) ((type)(__uintptr_t)(const void *)(var))
14#endif
15
16struct firmware {
17 size_t size;
18 const u8 *data;
19};
20
21static inline int
22request_firmware(const struct firmware **fw, const char *name,
23 struct device *device)
24{
25 int r;
26 struct firmware *f = malloc(sizeof(struct firmware), M_DRM145,
27 M_WAITOK0x0001 | M_ZERO0x0008);
28 r = loadfirmware(name, __DECONST(u_char **, &f->data)((u_char **)(__uintptr_t)(const void *)(&f->data)), &f->size);
29 if (r != 0) {
7
Assuming 'r' is not equal to 0
8
Taking true branch
30 free(f, M_DRM145, sizeof(struct firmware));
31 *fw = NULL((void *)0);
9
Null pointer value stored to field 'vce_fw'
32 return -r;
33 } else {
34 *fw = f;
35 return 0;
36 }
37}
38
39static inline int
40request_firmware_direct(const struct firmware **fw, const char *name,
41 struct device *device)
42{
43 return request_firmware(fw, name, device);
44}
45
46#define request_firmware_nowait(a, b, c, d, e, f, g)-22 -EINVAL22
47
48static inline void
49release_firmware(const struct firmware *fw)
50{
51 if (fw)
52 free(__DECONST(u_char *, fw->data)((u_char *)(__uintptr_t)(const void *)(fw->data)), M_DEVBUF2, fw->size);
53 free(__DECONST(struct firmware *, fw)((struct firmware *)(__uintptr_t)(const void *)(fw)), M_DRM145, sizeof(*fw));
54}
55
56#endif