Bug Summary

File:dev/pci/drm/amd/amdgpu/amdgpu_uvd.c
Warning:line 235, column 47
Access to field 'data' results in a dereference of a null pointer (loaded from field 'fw')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name amdgpu_uvd.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_uvd.c

/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_uvd.c

1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31#include <linux/firmware.h>
32#include <linux/module.h>
33
34#include <drm/drm.h>
35
36#include "amdgpu.h"
37#include "amdgpu_pm.h"
38#include "amdgpu_uvd.h"
39#include "cikd.h"
40#include "uvd/uvd_4_2_d.h"
41
42#include "amdgpu_ras.h"
43
44/* 1 second timeout */
45#define UVD_IDLE_TIMEOUT(((uint64_t)(1000)) * hz / 1000) msecs_to_jiffies(1000)(((uint64_t)(1000)) * hz / 1000)
46
47/* Firmware versions for VI */
48#define FW_1_65_10((1 << 24) | (65 << 16) | (10 << 8)) ((1 << 24) | (65 << 16) | (10 << 8))
49#define FW_1_87_11((1 << 24) | (87 << 16) | (11 << 8)) ((1 << 24) | (87 << 16) | (11 << 8))
50#define FW_1_87_12((1 << 24) | (87 << 16) | (12 << 8)) ((1 << 24) | (87 << 16) | (12 << 8))
51#define FW_1_37_15((1 << 24) | (37 << 16) | (15 << 8)) ((1 << 24) | (37 << 16) | (15 << 8))
52
53/* Polaris10/11 firmware version */
54#define FW_1_66_16((1 << 24) | (66 << 16) | (16 << 8)) ((1 << 24) | (66 << 16) | (16 << 8))
55
56/* Firmware Names */
57#ifdef CONFIG_DRM_AMDGPU_SI
58#define FIRMWARE_TAHITI "amdgpu/tahiti_uvd.bin"
59#define FIRMWARE_VERDE "amdgpu/verde_uvd.bin"
60#define FIRMWARE_PITCAIRN "amdgpu/pitcairn_uvd.bin"
61#define FIRMWARE_OLAND "amdgpu/oland_uvd.bin"
62#endif
63#ifdef CONFIG_DRM_AMDGPU_CIK
64#define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin"
65#define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin"
66#define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin"
67#define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin"
68#define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin"
69#endif
70#define FIRMWARE_TONGA"amdgpu/tonga_uvd.bin" "amdgpu/tonga_uvd.bin"
71#define FIRMWARE_CARRIZO"amdgpu/carrizo_uvd.bin" "amdgpu/carrizo_uvd.bin"
72#define FIRMWARE_FIJI"amdgpu/fiji_uvd.bin" "amdgpu/fiji_uvd.bin"
73#define FIRMWARE_STONEY"amdgpu/stoney_uvd.bin" "amdgpu/stoney_uvd.bin"
74#define FIRMWARE_POLARIS10"amdgpu/polaris10_uvd.bin" "amdgpu/polaris10_uvd.bin"
75#define FIRMWARE_POLARIS11"amdgpu/polaris11_uvd.bin" "amdgpu/polaris11_uvd.bin"
76#define FIRMWARE_POLARIS12"amdgpu/polaris12_uvd.bin" "amdgpu/polaris12_uvd.bin"
77#define FIRMWARE_VEGAM"amdgpu/vegam_uvd.bin" "amdgpu/vegam_uvd.bin"
78
79#define FIRMWARE_VEGA10"amdgpu/vega10_uvd.bin" "amdgpu/vega10_uvd.bin"
80#define FIRMWARE_VEGA12"amdgpu/vega12_uvd.bin" "amdgpu/vega12_uvd.bin"
81#define FIRMWARE_VEGA20"amdgpu/vega20_uvd.bin" "amdgpu/vega20_uvd.bin"
82
83/* These are common relative offsets for all asics, from uvd_7_0_offset.h, */
84#define UVD_GPCOM_VCPU_CMD0x03c3 0x03c3
85#define UVD_GPCOM_VCPU_DATA00x03c4 0x03c4
86#define UVD_GPCOM_VCPU_DATA10x03c5 0x03c5
87#define UVD_NO_OP0x03ff 0x03ff
88#define UVD_BASE_SI0x3800 0x3800
89
90/**
91 * amdgpu_uvd_cs_ctx - Command submission parser context
92 *
93 * Used for emulating virtual memory support on UVD 4.2.
94 */
95struct amdgpu_uvd_cs_ctx {
96 struct amdgpu_cs_parser *parser;
97 unsigned reg, count;
98 unsigned data0, data1;
99 unsigned idx;
100 unsigned ib_idx;
101
102 /* does the IB has a msg command */
103 bool_Bool has_msg_cmd;
104
105 /* minimum buffer sizes */
106 unsigned *buf_sizes;
107};
108
109#ifdef CONFIG_DRM_AMDGPU_SI
110MODULE_FIRMWARE(FIRMWARE_TAHITI);
111MODULE_FIRMWARE(FIRMWARE_VERDE);
112MODULE_FIRMWARE(FIRMWARE_PITCAIRN);
113MODULE_FIRMWARE(FIRMWARE_OLAND);
114#endif
115#ifdef CONFIG_DRM_AMDGPU_CIK
116MODULE_FIRMWARE(FIRMWARE_BONAIRE);
117MODULE_FIRMWARE(FIRMWARE_KABINI);
118MODULE_FIRMWARE(FIRMWARE_KAVERI);
119MODULE_FIRMWARE(FIRMWARE_HAWAII);
120MODULE_FIRMWARE(FIRMWARE_MULLINS);
121#endif
122MODULE_FIRMWARE(FIRMWARE_TONGA);
123MODULE_FIRMWARE(FIRMWARE_CARRIZO);
124MODULE_FIRMWARE(FIRMWARE_FIJI);
125MODULE_FIRMWARE(FIRMWARE_STONEY);
126MODULE_FIRMWARE(FIRMWARE_POLARIS10);
127MODULE_FIRMWARE(FIRMWARE_POLARIS11);
128MODULE_FIRMWARE(FIRMWARE_POLARIS12);
129MODULE_FIRMWARE(FIRMWARE_VEGAM);
130
131MODULE_FIRMWARE(FIRMWARE_VEGA10);
132MODULE_FIRMWARE(FIRMWARE_VEGA12);
133MODULE_FIRMWARE(FIRMWARE_VEGA20);
134
135static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
136
137int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
138{
139 unsigned long bo_size;
140 const char *fw_name;
141 const struct common_firmware_header *hdr;
142 unsigned family_id;
143 int i, j, r;
144
145 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
1
Calling 'INIT_DELAYED_WORK'
3
Returning from 'INIT_DELAYED_WORK'
146
147 switch (adev->asic_type) {
4
Control jumps to 'case CHIP_VEGA20:' at line 209
148#ifdef CONFIG_DRM_AMDGPU_SI
149 case CHIP_TAHITI:
150 fw_name = FIRMWARE_TAHITI;
151 break;
152 case CHIP_VERDE:
153 fw_name = FIRMWARE_VERDE;
154 break;
155 case CHIP_PITCAIRN:
156 fw_name = FIRMWARE_PITCAIRN;
157 break;
158 case CHIP_OLAND:
159 fw_name = FIRMWARE_OLAND;
160 break;
161#endif
162#ifdef CONFIG_DRM_AMDGPU_CIK
163 case CHIP_BONAIRE:
164 fw_name = FIRMWARE_BONAIRE;
165 break;
166 case CHIP_KABINI:
167 fw_name = FIRMWARE_KABINI;
168 break;
169 case CHIP_KAVERI:
170 fw_name = FIRMWARE_KAVERI;
171 break;
172 case CHIP_HAWAII:
173 fw_name = FIRMWARE_HAWAII;
174 break;
175 case CHIP_MULLINS:
176 fw_name = FIRMWARE_MULLINS;
177 break;
178#endif
179 case CHIP_TONGA:
180 fw_name = FIRMWARE_TONGA"amdgpu/tonga_uvd.bin";
181 break;
182 case CHIP_FIJI:
183 fw_name = FIRMWARE_FIJI"amdgpu/fiji_uvd.bin";
184 break;
185 case CHIP_CARRIZO:
186 fw_name = FIRMWARE_CARRIZO"amdgpu/carrizo_uvd.bin";
187 break;
188 case CHIP_STONEY:
189 fw_name = FIRMWARE_STONEY"amdgpu/stoney_uvd.bin";
190 break;
191 case CHIP_POLARIS10:
192 fw_name = FIRMWARE_POLARIS10"amdgpu/polaris10_uvd.bin";
193 break;
194 case CHIP_POLARIS11:
195 fw_name = FIRMWARE_POLARIS11"amdgpu/polaris11_uvd.bin";
196 break;
197 case CHIP_POLARIS12:
198 fw_name = FIRMWARE_POLARIS12"amdgpu/polaris12_uvd.bin";
199 break;
200 case CHIP_VEGA10:
201 fw_name = FIRMWARE_VEGA10"amdgpu/vega10_uvd.bin";
202 break;
203 case CHIP_VEGA12:
204 fw_name = FIRMWARE_VEGA12"amdgpu/vega12_uvd.bin";
205 break;
206 case CHIP_VEGAM:
207 fw_name = FIRMWARE_VEGAM"amdgpu/vegam_uvd.bin";
208 break;
209 case CHIP_VEGA20:
210 fw_name = FIRMWARE_VEGA20"amdgpu/vega20_uvd.bin";
211 break;
5
Execution continues on line 216
212 default:
213 return -EINVAL22;
214 }
215
216 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
6
Calling 'request_firmware'
10
Returning from 'request_firmware'
217 if (r) {
11
Assuming 'r' is 0
12
Taking false branch
218 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",printf("drm:pid%d:%s *ERROR* " "amdgpu_uvd: Can't load firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name
)
219 fw_name)printf("drm:pid%d:%s *ERROR* " "amdgpu_uvd: Can't load firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name
)
;
220 return r;
221 }
222
223 r = amdgpu_ucode_validate(adev->uvd.fw);
224 if (r) {
13
Assuming 'r' is 0
14
Taking false branch
225 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",printf("drm:pid%d:%s *ERROR* " "amdgpu_uvd: Can't validate firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name
)
226 fw_name)printf("drm:pid%d:%s *ERROR* " "amdgpu_uvd: Can't validate firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name
)
;
227 release_firmware(adev->uvd.fw);
228 adev->uvd.fw = NULL((void *)0);
229 return r;
230 }
231
232 /* Set the default UVD handles that the firmware can handle */
233 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES10;
234
235 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
15
Access to field 'data' results in a dereference of a null pointer (loaded from field 'fw')
236 family_id = le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) & 0xff;
237
238 if (adev->asic_type < CHIP_VEGA20) {
239 unsigned version_major, version_minor;
240
241 version_major = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 24) & 0xff;
242 version_minor = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 8) & 0xff;
243 DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n",printk("\0016" "[" "drm" "] " "Found UVD firmware Version: %u.%u Family ID: %u\n"
, version_major, version_minor, family_id)
244 version_major, version_minor, family_id)printk("\0016" "[" "drm" "] " "Found UVD firmware Version: %u.%u Family ID: %u\n"
, version_major, version_minor, family_id)
;
245
246 /*
247 * Limit the number of UVD handles depending on microcode major
248 * and minor versions. The firmware version which has 40 UVD
249 * instances support is 1.80. So all subsequent versions should
250 * also have the same support.
251 */
252 if ((version_major > 0x01) ||
253 ((version_major == 0x01) && (version_minor >= 0x50)))
254 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES40;
255
256 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
257 (family_id << 8));
258
259 if ((adev->asic_type == CHIP_POLARIS10 ||
260 adev->asic_type == CHIP_POLARIS11) &&
261 (adev->uvd.fw_version < FW_1_66_16((1 << 24) | (66 << 16) | (16 << 8))))
262 DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",__drm_err("POLARIS10/11 UVD firmware version %u.%u is too old.\n"
, version_major, version_minor)
263 version_major, version_minor)__drm_err("POLARIS10/11 UVD firmware version %u.%u is too old.\n"
, version_major, version_minor)
;
264 } else {
265 unsigned int enc_major, enc_minor, dec_minor;
266
267 dec_minor = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 8) & 0xff;
268 enc_minor = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 24) & 0x3f;
269 enc_major = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 30) & 0x3;
270 DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n",printk("\0016" "[" "drm" "] " "Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n"
, enc_major, enc_minor, dec_minor, family_id)
271 enc_major, enc_minor, dec_minor, family_id)printk("\0016" "[" "drm" "] " "Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n"
, enc_major, enc_minor, dec_minor, family_id)
;
272
273 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES40;
274
275 adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version));
276 }
277
278 bo_size = AMDGPU_UVD_STACK_SIZE(200*1024) + AMDGPU_UVD_HEAP_SIZE(256*1024)
279 + AMDGPU_UVD_SESSION_SIZE(50*1024) * adev->uvd.max_handles;
280 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
281 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)(((((__uint32_t)(hdr->ucode_size_bytes)) + 8) + (4096 - 1)
) & ~(4096 - 1))
;
282
283 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
284 if (adev->uvd.harvest_config & (1 << j))
285 continue;
286 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE(1 << 12),
287 AMDGPU_GEM_DOMAIN_VRAM0x4, &adev->uvd.inst[j].vcpu_bo,
288 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
289 if (r) {
290 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to allocate UVD bo\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
291 return r;
292 }
293 }
294
295 for (i = 0; i < adev->uvd.max_handles; ++i) {
296 atomic_set(&adev->uvd.handles[i], 0)({ typeof(*(&adev->uvd.handles[i])) __tmp = ((0)); *(volatile
typeof(*(&adev->uvd.handles[i])) *)&(*(&adev->
uvd.handles[i])) = __tmp; __tmp; })
;
297 adev->uvd.filp[i] = NULL((void *)0);
298 }
299
300 /* from uvd v5.0 HW addressing capacity increased to 64 bits */
301 if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
302 adev->uvd.address_64_bit = true1;
303
304 switch (adev->asic_type) {
305 case CHIP_TONGA:
306 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10((1 << 24) | (65 << 16) | (10 << 8));
307 break;
308 case CHIP_CARRIZO:
309 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11((1 << 24) | (87 << 16) | (11 << 8));
310 break;
311 case CHIP_FIJI:
312 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12((1 << 24) | (87 << 16) | (12 << 8));
313 break;
314 case CHIP_STONEY:
315 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15((1 << 24) | (37 << 16) | (15 << 8));
316 break;
317 default:
318 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
319 }
320
321 return 0;
322}
323
324int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
325{
326 int i, j;
327
328 cancel_delayed_work_sync(&adev->uvd.idle_work);
329 drm_sched_entity_destroy(&adev->uvd.entity);
330
331 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
332 if (adev->uvd.harvest_config & (1 << j))
333 continue;
334 kvfree(adev->uvd.inst[j].saved_bo);
335
336 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
337 &adev->uvd.inst[j].gpu_addr,
338 (void **)&adev->uvd.inst[j].cpu_addr);
339
340 amdgpu_ring_fini(&adev->uvd.inst[j].ring);
341
342 for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS2; ++i)
343 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
344 }
345 release_firmware(adev->uvd.fw);
346
347 return 0;
348}
349
350/**
351 * amdgpu_uvd_entity_init - init entity
352 *
353 * @adev: amdgpu_device pointer
354 *
355 */
356int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
357{
358 struct amdgpu_ring *ring;
359 struct drm_gpu_scheduler *sched;
360 int r;
361
362 ring = &adev->uvd.inst[0].ring;
363 sched = &ring->sched;
364 r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
365 &sched, 1, NULL((void *)0));
366 if (r) {
367 DRM_ERROR("Failed setting up UVD kernel entity.\n")__drm_err("Failed setting up UVD kernel entity.\n");
368 return r;
369 }
370
371 return 0;
372}
373
374int amdgpu_uvd_suspend(struct amdgpu_device *adev)
375{
376 unsigned size;
377 void *ptr;
378 int i, j;
379 bool_Bool in_ras_intr = amdgpu_ras_intr_triggered();
380
381 cancel_delayed_work_sync(&adev->uvd.idle_work);
382
383 /* only valid for physical mode */
384 if (adev->asic_type < CHIP_POLARIS10) {
385 for (i = 0; i < adev->uvd.max_handles; ++i)
386 if (atomic_read(&adev->uvd.handles[i])({ typeof(*(&adev->uvd.handles[i])) __tmp = *(volatile
typeof(*(&adev->uvd.handles[i])) *)&(*(&adev->
uvd.handles[i])); membar_datadep_consumer(); __tmp; })
)
387 break;
388
389 if (i == adev->uvd.max_handles)
390 return 0;
391 }
392
393 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
394 if (adev->uvd.harvest_config & (1 << j))
395 continue;
396 if (adev->uvd.inst[j].vcpu_bo == NULL((void *)0))
397 continue;
398
399 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
400 ptr = adev->uvd.inst[j].cpu_addr;
401
402 adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL(0x0001 | 0x0004));
403 if (!adev->uvd.inst[j].saved_bo)
404 return -ENOMEM12;
405
406 /* re-write 0 since err_event_athub will corrupt VCPU buffer */
407 if (in_ras_intr)
408 memset(adev->uvd.inst[j].saved_bo, 0, size)__builtin_memset((adev->uvd.inst[j].saved_bo), (0), (size)
)
;
409 else
410 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size)__builtin_memcpy((adev->uvd.inst[j].saved_bo), (ptr), (size
))
;
411 }
412
413 if (in_ras_intr)
414 DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n")printk("\0014" "[" "drm" "] " "UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"
)
;
415
416 return 0;
417}
418
419int amdgpu_uvd_resume(struct amdgpu_device *adev)
420{
421 unsigned size;
422 void *ptr;
423 int i;
424
425 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
426 if (adev->uvd.harvest_config & (1 << i))
427 continue;
428 if (adev->uvd.inst[i].vcpu_bo == NULL((void *)0))
429 return -EINVAL22;
430
431 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
432 ptr = adev->uvd.inst[i].cpu_addr;
433
434 if (adev->uvd.inst[i].saved_bo != NULL((void *)0)) {
435 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size)__builtin_memcpy((ptr), (adev->uvd.inst[i].saved_bo), (size
))
;
436 kvfree(adev->uvd.inst[i].saved_bo);
437 adev->uvd.inst[i].saved_bo = NULL((void *)0);
438 } else {
439 const struct common_firmware_header *hdr;
440 unsigned offset;
441
442 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
443 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
444 offset = le32_to_cpu(hdr->ucode_array_offset_bytes)((__uint32_t)(hdr->ucode_array_offset_bytes));
445 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,__builtin_memcpy((adev->uvd.inst[i].cpu_addr), (adev->uvd
.fw->data + offset), (((__uint32_t)(hdr->ucode_size_bytes
))))
446 le32_to_cpu(hdr->ucode_size_bytes))__builtin_memcpy((adev->uvd.inst[i].cpu_addr), (adev->uvd
.fw->data + offset), (((__uint32_t)(hdr->ucode_size_bytes
))))
;
447 size -= le32_to_cpu(hdr->ucode_size_bytes)((__uint32_t)(hdr->ucode_size_bytes));
448 ptr += le32_to_cpu(hdr->ucode_size_bytes)((__uint32_t)(hdr->ucode_size_bytes));
449 }
450 memset_io(ptr, 0, size)__builtin_memset((ptr), (0), (size));
451 /* to restore uvd fence seq */
452 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
453 }
454 }
455 return 0;
456}
457
458void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
459{
460 struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
461 int i, r;
462
463 for (i = 0; i < adev->uvd.max_handles; ++i) {
464 uint32_t handle = atomic_read(&adev->uvd.handles[i])({ typeof(*(&adev->uvd.handles[i])) __tmp = *(volatile
typeof(*(&adev->uvd.handles[i])) *)&(*(&adev->
uvd.handles[i])); membar_datadep_consumer(); __tmp; })
;
465
466 if (handle != 0 && adev->uvd.filp[i] == filp) {
467 struct dma_fence *fence;
468
469 r = amdgpu_uvd_get_destroy_msg(ring, handle, false0,
470 &fence);
471 if (r) {
472 DRM_ERROR("Error destroying UVD %d!\n", r)__drm_err("Error destroying UVD %d!\n", r);
473 continue;
474 }
475
476 dma_fence_wait(fence, false0);
477 dma_fence_put(fence);
478
479 adev->uvd.filp[i] = NULL((void *)0);
480 atomic_set(&adev->uvd.handles[i], 0)({ typeof(*(&adev->uvd.handles[i])) __tmp = ((0)); *(volatile
typeof(*(&adev->uvd.handles[i])) *)&(*(&adev->
uvd.handles[i])) = __tmp; __tmp; })
;
481 }
482 }
483}
484
485static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
486{
487 int i;
488 for (i = 0; i < abo->placement.num_placement; ++i) {
489 abo->placements[i].fpfn = 0 >> PAGE_SHIFT12;
490 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT12;
491 }
492}
493
494static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
495{
496 uint32_t lo, hi;
497 uint64_t addr;
498
499 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
500 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
501 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
502
503 return addr;
504}
505
506/**
507 * amdgpu_uvd_cs_pass1 - first parsing round
508 *
509 * @ctx: UVD parser context
510 *
511 * Make sure UVD message and feedback buffers are in VRAM and
512 * nobody is violating an 256MB boundary.
513 */
514static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
515{
516 struct ttm_operation_ctx tctx = { false0, false0 };
517 struct amdgpu_bo_va_mapping *mapping;
518 struct amdgpu_bo *bo;
519 uint32_t cmd;
520 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
521 int r = 0;
522
523 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
524 if (r) {
525 DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr)__drm_err("Can't find BO for addr 0x%08llx\n", addr);
526 return r;
527 }
528
529 if (!ctx->parser->adev->uvd.address_64_bit) {
530 /* check if it's a message or feedback command */
531 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
532 if (cmd == 0x0 || cmd == 0x3) {
533 /* yes, force it into VRAM */
534 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM0x4;
535 amdgpu_bo_placement_from_domain(bo, domain);
536 }
537 amdgpu_uvd_force_into_uvd_segment(bo);
538
539 r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
540 }
541
542 return r;
543}
544
545/**
546 * amdgpu_uvd_cs_msg_decode - handle UVD decode message
547 *
548 * @msg: pointer to message structure
549 * @buf_sizes: returned buffer sizes
550 *
551 * Peek into the decode message and calculate the necessary buffer sizes.
552 */
553static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
554 unsigned buf_sizes[])
555{
556 unsigned stream_type = msg[4];
557 unsigned width = msg[6];
558 unsigned height = msg[7];
559 unsigned dpb_size = msg[9];
560 unsigned pitch = msg[28];
561 unsigned level = msg[57];
562
563 unsigned width_in_mb = width / 16;
564 unsigned height_in_mb = roundup2(height / 16, 2)(((height / 16) + ((2) - 1)) & (~((__typeof(height / 16))
(2) - 1)))
;
565 unsigned fs_in_mb = width_in_mb * height_in_mb;
566
567 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
568 unsigned min_ctx_size = ~0;
569
570 image_size = width * height;
571 image_size += image_size / 2;
572 image_size = roundup2(image_size, 1024)(((image_size) + ((1024) - 1)) & (~((__typeof(image_size)
)(1024) - 1)))
;
573
574 switch (stream_type) {
575 case 0: /* H264 */
576 switch(level) {
577 case 30:
578 num_dpb_buffer = 8100 / fs_in_mb;
579 break;
580 case 31:
581 num_dpb_buffer = 18000 / fs_in_mb;
582 break;
583 case 32:
584 num_dpb_buffer = 20480 / fs_in_mb;
585 break;
586 case 41:
587 num_dpb_buffer = 32768 / fs_in_mb;
588 break;
589 case 42:
590 num_dpb_buffer = 34816 / fs_in_mb;
591 break;
592 case 50:
593 num_dpb_buffer = 110400 / fs_in_mb;
594 break;
595 case 51:
596 num_dpb_buffer = 184320 / fs_in_mb;
597 break;
598 default:
599 num_dpb_buffer = 184320 / fs_in_mb;
600 break;
601 }
602 num_dpb_buffer++;
603 if (num_dpb_buffer > 17)
604 num_dpb_buffer = 17;
605
606 /* reference picture buffer */
607 min_dpb_size = image_size * num_dpb_buffer;
608
609 /* macroblock context buffer */
610 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
611
612 /* IT surface buffer */
613 min_dpb_size += width_in_mb * height_in_mb * 32;
614 break;
615
616 case 1: /* VC1 */
617
618 /* reference picture buffer */
619 min_dpb_size = image_size * 3;
620
621 /* CONTEXT_BUFFER */
622 min_dpb_size += width_in_mb * height_in_mb * 128;
623
624 /* IT surface buffer */
625 min_dpb_size += width_in_mb * 64;
626
627 /* DB surface buffer */
628 min_dpb_size += width_in_mb * 128;
629
630 /* BP */
631 tmp = max(width_in_mb, height_in_mb)(((width_in_mb)>(height_in_mb))?(width_in_mb):(height_in_mb
))
;
632 min_dpb_size += roundup2(tmp * 7 * 16, 64)(((tmp * 7 * 16) + ((64) - 1)) & (~((__typeof(tmp * 7 * 16
))(64) - 1)))
;
633 break;
634
635 case 3: /* MPEG2 */
636
637 /* reference picture buffer */
638 min_dpb_size = image_size * 3;
639 break;
640
641 case 4: /* MPEG4 */
642
643 /* reference picture buffer */
644 min_dpb_size = image_size * 3;
645
646 /* CM */
647 min_dpb_size += width_in_mb * height_in_mb * 64;
648
649 /* IT surface buffer */
650 min_dpb_size += roundup2(width_in_mb * height_in_mb * 32, 64)(((width_in_mb * height_in_mb * 32) + ((64) - 1)) & (~((__typeof
(width_in_mb * height_in_mb * 32))(64) - 1)))
;
651 break;
652
653 case 7: /* H264 Perf */
654 switch(level) {
655 case 30:
656 num_dpb_buffer = 8100 / fs_in_mb;
657 break;
658 case 31:
659 num_dpb_buffer = 18000 / fs_in_mb;
660 break;
661 case 32:
662 num_dpb_buffer = 20480 / fs_in_mb;
663 break;
664 case 41:
665 num_dpb_buffer = 32768 / fs_in_mb;
666 break;
667 case 42:
668 num_dpb_buffer = 34816 / fs_in_mb;
669 break;
670 case 50:
671 num_dpb_buffer = 110400 / fs_in_mb;
672 break;
673 case 51:
674 num_dpb_buffer = 184320 / fs_in_mb;
675 break;
676 default:
677 num_dpb_buffer = 184320 / fs_in_mb;
678 break;
679 }
680 num_dpb_buffer++;
681 if (num_dpb_buffer > 17)
682 num_dpb_buffer = 17;
683
684 /* reference picture buffer */
685 min_dpb_size = image_size * num_dpb_buffer;
686
687 if (!adev->uvd.use_ctx_buf){
688 /* macroblock context buffer */
689 min_dpb_size +=
690 width_in_mb * height_in_mb * num_dpb_buffer * 192;
691
692 /* IT surface buffer */
693 min_dpb_size += width_in_mb * height_in_mb * 32;
694 } else {
695 /* macroblock context buffer */
696 min_ctx_size =
697 width_in_mb * height_in_mb * num_dpb_buffer * 192;
698 }
699 break;
700
701 case 8: /* MJPEG */
702 min_dpb_size = 0;
703 break;
704
705 case 16: /* H265 */
706 image_size = (roundup2(width, 16)(((width) + ((16) - 1)) & (~((__typeof(width))(16) - 1))) * roundup2(height, 16)(((height) + ((16) - 1)) & (~((__typeof(height))(16) - 1)
))
* 3) / 2;
707 image_size = roundup2(image_size, 256)(((image_size) + ((256) - 1)) & (~((__typeof(image_size))
(256) - 1)))
;
708
709 num_dpb_buffer = (le32_to_cpu(msg[59])((__uint32_t)(msg[59])) & 0xff) + 2;
710 min_dpb_size = image_size * num_dpb_buffer;
711 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
712 * 16 * num_dpb_buffer + 52 * 1024;
713 break;
714
715 default:
716 DRM_ERROR("UVD codec not handled %d!\n", stream_type)__drm_err("UVD codec not handled %d!\n", stream_type);
717 return -EINVAL22;
718 }
719
720 if (width > pitch) {
721 DRM_ERROR("Invalid UVD decoding target pitch!\n")__drm_err("Invalid UVD decoding target pitch!\n");
722 return -EINVAL22;
723 }
724
725 if (dpb_size < min_dpb_size) {
726 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",__drm_err("Invalid dpb_size in UVD message (%d / %d)!\n", dpb_size
, min_dpb_size)
727 dpb_size, min_dpb_size)__drm_err("Invalid dpb_size in UVD message (%d / %d)!\n", dpb_size
, min_dpb_size)
;
728 return -EINVAL22;
729 }
730
731 buf_sizes[0x1] = dpb_size;
732 buf_sizes[0x2] = image_size;
733 buf_sizes[0x4] = min_ctx_size;
734 /* store image width to adjust nb memory pstate */
735 adev->uvd.decode_image_width = width;
736 return 0;
737}
738
739/**
740 * amdgpu_uvd_cs_msg - handle UVD message
741 *
742 * @ctx: UVD parser context
743 * @bo: buffer object containing the message
744 * @offset: offset into the buffer object
745 *
746 * Peek into the UVD message and extract the session id.
747 * Make sure that we don't open up to many sessions.
748 */
749static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
750 struct amdgpu_bo *bo, unsigned offset)
751{
752 struct amdgpu_device *adev = ctx->parser->adev;
753 int32_t *msg, msg_type, handle;
754 void *ptr;
755 long r;
756 int i;
757
758 if (offset & 0x3F) {
759 DRM_ERROR("UVD messages must be 64 byte aligned!\n")__drm_err("UVD messages must be 64 byte aligned!\n");
760 return -EINVAL22;
761 }
762
763 r = amdgpu_bo_kmap(bo, &ptr);
764 if (r) {
765 DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r)__drm_err("Failed mapping the UVD) message (%ld)!\n", r);
766 return r;
767 }
768
769 msg = ptr + offset;
770
771 msg_type = msg[1];
772 handle = msg[2];
773
774 if (handle == 0) {
775 DRM_ERROR("Invalid UVD handle!\n")__drm_err("Invalid UVD handle!\n");
776 return -EINVAL22;
777 }
778
779 switch (msg_type) {
780 case 0:
781 /* it's a create msg, calc image size (width * height) */
782 amdgpu_bo_kunmap(bo);
783
784 /* try to alloc a new handle */
785 for (i = 0; i < adev->uvd.max_handles; ++i) {
786 if (atomic_read(&adev->uvd.handles[i])({ typeof(*(&adev->uvd.handles[i])) __tmp = *(volatile
typeof(*(&adev->uvd.handles[i])) *)&(*(&adev->
uvd.handles[i])); membar_datadep_consumer(); __tmp; })
== handle) {
787 DRM_ERROR(")Handle 0x%x already in use!\n",__drm_err(")Handle 0x%x already in use!\n", handle)
788 handle)__drm_err(")Handle 0x%x already in use!\n", handle);
789 return -EINVAL22;
790 }
791
792 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)__sync_val_compare_and_swap(&adev->uvd.handles[i], 0, handle
)
) {
793 adev->uvd.filp[i] = ctx->parser->filp;
794 return 0;
795 }
796 }
797
798 DRM_ERROR("No more free UVD handles!\n")__drm_err("No more free UVD handles!\n");
799 return -ENOSPC28;
800
801 case 1:
802 /* it's a decode msg, calc buffer sizes */
803 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
804 amdgpu_bo_kunmap(bo);
805 if (r)
806 return r;
807
808 /* validate the handle */
809 for (i = 0; i < adev->uvd.max_handles; ++i) {
810 if (atomic_read(&adev->uvd.handles[i])({ typeof(*(&adev->uvd.handles[i])) __tmp = *(volatile
typeof(*(&adev->uvd.handles[i])) *)&(*(&adev->
uvd.handles[i])); membar_datadep_consumer(); __tmp; })
== handle) {
811 if (adev->uvd.filp[i] != ctx->parser->filp) {
812 DRM_ERROR("UVD handle collision detected!\n")__drm_err("UVD handle collision detected!\n");
813 return -EINVAL22;
814 }
815 return 0;
816 }
817 }
818
819 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle)__drm_err("Invalid UVD handle 0x%x!\n", handle);
820 return -ENOENT2;
821
822 case 2:
823 /* it's a destroy msg, free the handle */
824 for (i = 0; i < adev->uvd.max_handles; ++i)
825 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0)__sync_val_compare_and_swap(&adev->uvd.handles[i], handle
, 0)
;
826 amdgpu_bo_kunmap(bo);
827 return 0;
828
829 default:
830 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type)__drm_err("Illegal UVD message type (%d)!\n", msg_type);
831 return -EINVAL22;
832 }
833 BUG()do { panic("BUG at %s:%d", "/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_uvd.c"
, 833); } while (0)
;
834 return -EINVAL22;
835}
836
837/**
838 * amdgpu_uvd_cs_pass2 - second parsing round
839 *
840 * @ctx: UVD parser context
841 *
842 * Patch buffer addresses, make sure buffer sizes are correct.
843 */
844static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
845{
846 struct amdgpu_bo_va_mapping *mapping;
847 struct amdgpu_bo *bo;
848 uint32_t cmd;
849 uint64_t start, end;
850 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
851 int r;
852
853 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
854 if (r) {
855 DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr)__drm_err("Can't find BO for addr 0x%08llx\n", addr);
856 return r;
857 }
858
859 start = amdgpu_bo_gpu_offset(bo);
860
861 end = (mapping->last + 1 - mapping->start);
862 end = end * AMDGPU_GPU_PAGE_SIZE4096 + start;
863
864 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE4096;
865 start += addr;
866
867 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
868 lower_32_bits(start)((u32)(start)));
869 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
870 upper_32_bits(start)((u32)(((start) >> 16) >> 16)));
871
872 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
873 if (cmd < 0x4) {
874 if ((end - start) < ctx->buf_sizes[cmd]) {
875 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,__drm_err("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned
)(end - start), ctx->buf_sizes[cmd])
876 (unsigned)(end - start),__drm_err("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned
)(end - start), ctx->buf_sizes[cmd])
877 ctx->buf_sizes[cmd])__drm_err("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned
)(end - start), ctx->buf_sizes[cmd])
;
878 return -EINVAL22;
879 }
880
881 } else if (cmd == 0x206) {
882 if ((end - start) < ctx->buf_sizes[4]) {
883 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,__drm_err("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned
)(end - start), ctx->buf_sizes[4])
884 (unsigned)(end - start),__drm_err("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned
)(end - start), ctx->buf_sizes[4])
885 ctx->buf_sizes[4])__drm_err("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned
)(end - start), ctx->buf_sizes[4])
;
886 return -EINVAL22;
887 }
888 } else if ((cmd != 0x100) && (cmd != 0x204)) {
889 DRM_ERROR("invalid UVD command %X!\n", cmd)__drm_err("invalid UVD command %X!\n", cmd);
890 return -EINVAL22;
891 }
892
893 if (!ctx->parser->adev->uvd.address_64_bit) {
894 if ((start >> 28) != ((end - 1) >> 28)) {
895 DRM_ERROR("reloc %llX-%llX crossing 256MB boundary!\n",__drm_err("reloc %llX-%llX crossing 256MB boundary!\n", start
, end)
896 start, end)__drm_err("reloc %llX-%llX crossing 256MB boundary!\n", start
, end)
;
897 return -EINVAL22;
898 }
899
900 if ((cmd == 0 || cmd == 0x3) &&
901 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
902 DRM_ERROR("msg/fb buffer %llX-%llX out of 256MB segment!\n",__drm_err("msg/fb buffer %llX-%llX out of 256MB segment!\n", start
, end)
903 start, end)__drm_err("msg/fb buffer %llX-%llX out of 256MB segment!\n", start
, end)
;
904 return -EINVAL22;
905 }
906 }
907
908 if (cmd == 0) {
909 ctx->has_msg_cmd = true1;
910 r = amdgpu_uvd_cs_msg(ctx, bo, addr);
911 if (r)
912 return r;
913 } else if (!ctx->has_msg_cmd) {
914 DRM_ERROR("Message needed before other commands are send!\n")__drm_err("Message needed before other commands are send!\n");
915 return -EINVAL22;
916 }
917
918 return 0;
919}
920
921/**
922 * amdgpu_uvd_cs_reg - parse register writes
923 *
924 * @ctx: UVD parser context
925 * @cb: callback function
926 *
927 * Parse the register writes, call cb on each complete command.
928 */
929static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
930 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
931{
932 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
933 int i, r;
934
935 ctx->idx++;
936 for (i = 0; i <= ctx->count; ++i) {
937 unsigned reg = ctx->reg + i;
938
939 if (ctx->idx >= ib->length_dw) {
940 DRM_ERROR("Register command after end of CS!\n")__drm_err("Register command after end of CS!\n");
941 return -EINVAL22;
942 }
943
944 switch (reg) {
945 case mmUVD_GPCOM_VCPU_DATA00x3bc4:
946 ctx->data0 = ctx->idx;
947 break;
948 case mmUVD_GPCOM_VCPU_DATA10x3bc5:
949 ctx->data1 = ctx->idx;
950 break;
951 case mmUVD_GPCOM_VCPU_CMD0x3bc3:
952 r = cb(ctx);
953 if (r)
954 return r;
955 break;
956 case mmUVD_ENGINE_CNTL0x3bc6:
957 case mmUVD_NO_OP0x3bff:
958 break;
959 default:
960 DRM_ERROR("Invalid reg 0x%X!\n", reg)__drm_err("Invalid reg 0x%X!\n", reg);
961 return -EINVAL22;
962 }
963 ctx->idx++;
964 }
965 return 0;
966}
967
968/**
969 * amdgpu_uvd_cs_packets - parse UVD packets
970 *
971 * @ctx: UVD parser context
972 * @cb: callback function
973 *
974 * Parse the command stream packets.
975 */
976static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
977 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
978{
979 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
980 int r;
981
982 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
983 uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
984 unsigned type = CP_PACKET_GET_TYPE(cmd)(((cmd) >> 30) & 3);
985 switch (type) {
986 case PACKET_TYPE00:
987 ctx->reg = CP_PACKET0_GET_REG(cmd)((cmd) & 0xFFFF);
988 ctx->count = CP_PACKET_GET_COUNT(cmd)(((cmd) >> 16) & 0x3FFF);
989 r = amdgpu_uvd_cs_reg(ctx, cb);
990 if (r)
991 return r;
992 break;
993 case PACKET_TYPE22:
994 ++ctx->idx;
995 break;
996 default:
997 DRM_ERROR("Unknown packet type %d !\n", type)__drm_err("Unknown packet type %d !\n", type);
998 return -EINVAL22;
999 }
1000 }
1001 return 0;
1002}
1003
1004/**
1005 * amdgpu_uvd_ring_parse_cs - UVD command submission parser
1006 *
1007 * @parser: Command submission parser context
1008 *
1009 * Parse the command stream, patch in addresses as necessary.
1010 */
1011int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
1012{
1013 struct amdgpu_uvd_cs_ctx ctx = {};
1014 unsigned buf_sizes[] = {
1015 [0x00000000] = 2048,
1016 [0x00000001] = 0xFFFFFFFF,
1017 [0x00000002] = 0xFFFFFFFF,
1018 [0x00000003] = 2048,
1019 [0x00000004] = 0xFFFFFFFF,
1020 };
1021 struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
1022 int r;
1023
1024 parser->job->vm = NULL((void *)0);
1025 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
1026
1027 if (ib->length_dw % 16) {
1028 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",__drm_err("UVD IB length (%d) not 16 dwords aligned!\n", ib->
length_dw)
1029 ib->length_dw)__drm_err("UVD IB length (%d) not 16 dwords aligned!\n", ib->
length_dw)
;
1030 return -EINVAL22;
1031 }
1032
1033 ctx.parser = parser;
1034 ctx.buf_sizes = buf_sizes;
1035 ctx.ib_idx = ib_idx;
1036
1037 /* first round only required on chips without UVD 64 bit address support */
1038 if (!parser->adev->uvd.address_64_bit) {
1039 /* first round, make sure the buffers are actually in the UVD segment */
1040 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
1041 if (r)
1042 return r;
1043 }
1044
1045 /* second round, patch buffer addresses into the command stream */
1046 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
1047 if (r)
1048 return r;
1049
1050 if (!ctx.has_msg_cmd) {
1051 DRM_ERROR("UVD-IBs need a msg command!\n")__drm_err("UVD-IBs need a msg command!\n");
1052 return -EINVAL22;
1053 }
1054
1055 return 0;
1056}
1057
1058static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
1059 bool_Bool direct, struct dma_fence **fence)
1060{
1061 struct amdgpu_device *adev = ring->adev;
1062 struct dma_fence *f = NULL((void *)0);
1063 struct amdgpu_job *job;
1064 struct amdgpu_ib *ib;
1065 uint32_t data[4];
1066 uint64_t addr;
1067 long r;
1068 int i;
1069 unsigned offset_idx = 0;
1070 unsigned offset[3] = { UVD_BASE_SI0x3800, 0, 0 };
1071
1072 amdgpu_bo_kunmap(bo);
1073 amdgpu_bo_unpin(bo);
1074
1075 if (!ring->adev->uvd.address_64_bit) {
1076 struct ttm_operation_ctx ctx = { true1, false0 };
1077
1078 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM0x4);
1079 amdgpu_uvd_force_into_uvd_segment(bo);
1080 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1081 if (r)
1082 goto err;
1083 }
1084
1085 r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
1086 AMDGPU_IB_POOL_DELAYED, &job);
1087 if (r)
1088 goto err;
1089
1090 if (adev->asic_type >= CHIP_VEGA10) {
1091 offset_idx = 1 + ring->me;
1092 offset[1] = adev->reg_offset[UVD_HWIP][0][1];
1093 offset[2] = adev->reg_offset[UVD_HWIP][1][1];
1094 }
1095
1096 data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0)((0 << 30) | ((offset[offset_idx] + 0x03c4) & 0xFFFF
) | ((0) & 0x3FFF) << 16)
;
1097 data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0)((0 << 30) | ((offset[offset_idx] + 0x03c5) & 0xFFFF
) | ((0) & 0x3FFF) << 16)
;
1098 data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0)((0 << 30) | ((offset[offset_idx] + 0x03c3) & 0xFFFF
) | ((0) & 0x3FFF) << 16)
;
1099 data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0)((0 << 30) | ((offset[offset_idx] + 0x03ff) & 0xFFFF
) | ((0) & 0x3FFF) << 16)
;
1100
1101 ib = &job->ibs[0];
1102 addr = amdgpu_bo_gpu_offset(bo);
1103 ib->ptr[0] = data[0];
1104 ib->ptr[1] = addr;
1105 ib->ptr[2] = data[1];
1106 ib->ptr[3] = addr >> 32;
1107 ib->ptr[4] = data[2];
1108 ib->ptr[5] = 0;
1109 for (i = 6; i < 16; i += 2) {
1110 ib->ptr[i] = data[3];
1111 ib->ptr[i+1] = 0;
1112 }
1113 ib->length_dw = 16;
1114
1115 if (direct) {
1116 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
1117 true1, false0,
1118 msecs_to_jiffies(10)(((uint64_t)(10)) * hz / 1000));
1119 if (r == 0)
1120 r = -ETIMEDOUT60;
1121 if (r < 0)
1122 goto err_free;
1123
1124 r = amdgpu_job_submit_direct(job, ring, &f);
1125 if (r)
1126 goto err_free;
1127 } else {
1128 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
1129 AMDGPU_SYNC_ALWAYS,
1130 AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul));
1131 if (r)
1132 goto err_free;
1133
1134 r = amdgpu_job_submit(job, &adev->uvd.entity,
1135 AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul), &f);
1136 if (r)
1137 goto err_free;
1138 }
1139
1140 amdgpu_bo_fence(bo, f, false0);
1141 amdgpu_bo_unreserve(bo);
1142 amdgpu_bo_unref(&bo);
1143
1144 if (fence)
1145 *fence = dma_fence_get(f);
1146 dma_fence_put(f);
1147
1148 return 0;
1149
1150err_free:
1151 amdgpu_job_free(job);
1152
1153err:
1154 amdgpu_bo_unreserve(bo);
1155 amdgpu_bo_unref(&bo);
1156 return r;
1157}
1158
1159/* multiple fence commands without any stream commands in between can
1160 crash the vcpu so just try to emmit a dummy create/destroy msg to
1161 avoid this */
1162int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1163 struct dma_fence **fence)
1164{
1165 struct amdgpu_device *adev = ring->adev;
1166 struct amdgpu_bo *bo = NULL((void *)0);
1167 uint32_t *msg;
1168 int r, i;
1169
1170 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE(1 << 12),
1171 AMDGPU_GEM_DOMAIN_VRAM0x4,
1172 &bo, NULL((void *)0), (void **)&msg);
1173 if (r)
1174 return r;
1175
1176 /* stitch together an UVD create msg */
1177 msg[0] = cpu_to_le32(0x00000de4)((__uint32_t)(0x00000de4));
1178 msg[1] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
1179 msg[2] = cpu_to_le32(handle)((__uint32_t)(handle));
1180 msg[3] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
1181 msg[4] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
1182 msg[5] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
1183 msg[6] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
1184 msg[7] = cpu_to_le32(0x00000780)((__uint32_t)(0x00000780));
1185 msg[8] = cpu_to_le32(0x00000440)((__uint32_t)(0x00000440));
1186 msg[9] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
1187 msg[10] = cpu_to_le32(0x01b37000)((__uint32_t)(0x01b37000));
1188 for (i = 11; i < 1024; ++i)
1189 msg[i] = cpu_to_le32(0x0)((__uint32_t)(0x0));
1190
1191 return amdgpu_uvd_send_msg(ring, bo, true1, fence);
1192}
1193
1194int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1195 bool_Bool direct, struct dma_fence **fence)
1196{
1197 struct amdgpu_device *adev = ring->adev;
1198 struct amdgpu_bo *bo = NULL((void *)0);
1199 uint32_t *msg;
1200 int r, i;
1201
1202 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE(1 << 12),
1203 AMDGPU_GEM_DOMAIN_VRAM0x4,
1204 &bo, NULL((void *)0), (void **)&msg);
1205 if (r)
1206 return r;
1207
1208 /* stitch together an UVD destroy msg */
1209 msg[0] = cpu_to_le32(0x00000de4)((__uint32_t)(0x00000de4));
1210 msg[1] = cpu_to_le32(0x00000002)((__uint32_t)(0x00000002));
1211 msg[2] = cpu_to_le32(handle)((__uint32_t)(handle));
1212 msg[3] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
1213 for (i = 4; i < 1024; ++i)
1214 msg[i] = cpu_to_le32(0x0)((__uint32_t)(0x0));
1215
1216 return amdgpu_uvd_send_msg(ring, bo, direct, fence);
1217}
1218
1219static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1220{
1221 struct amdgpu_device *adev =
1222 container_of(work, struct amdgpu_device, uvd.idle_work.work)({ const __typeof( ((struct amdgpu_device *)0)->uvd.idle_work
.work ) *__mptr = (work); (struct amdgpu_device *)( (char *)__mptr
- __builtin_offsetof(struct amdgpu_device, uvd.idle_work.work
) );})
;
1223 unsigned fences = 0, i, j;
1224
1225 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1226 if (adev->uvd.harvest_config & (1 << i))
1227 continue;
1228 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
1229 for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
1230 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
1231 }
1232 }
1233
1234 if (fences == 0) {
1235 if (adev->pm.dpm_enabled) {
1236 amdgpu_dpm_enable_uvd(adev, false0);
1237 } else {
1238 amdgpu_asic_set_uvd_clocks(adev, 0, 0)(adev)->asic_funcs->set_uvd_clocks((adev), (0), (0));
1239 /* shutdown the UVD block */
1240 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1241 AMD_PG_STATE_GATE);
1242 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1243 AMD_CG_STATE_GATE);
1244 }
1245 } else {
1246 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT(((uint64_t)(1000)) * hz / 1000));
1247 }
1248}
1249
1250void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1251{
1252 struct amdgpu_device *adev = ring->adev;
1253 bool_Bool set_clocks;
1254
1255 if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2)))
1256 return;
1257
1258 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1259 if (set_clocks) {
1260 if (adev->pm.dpm_enabled) {
1261 amdgpu_dpm_enable_uvd(adev, true1);
1262 } else {
1263 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000)(adev)->asic_funcs->set_uvd_clocks((adev), (53300), (40000
))
;
1264 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1265 AMD_CG_STATE_UNGATE);
1266 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1267 AMD_PG_STATE_UNGATE);
1268 }
1269 }
1270}
1271
1272void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1273{
1274 if (!amdgpu_sriov_vf(ring->adev)((ring->adev)->virt.caps & (1 << 2)))
1275 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT(((uint64_t)(1000)) * hz / 1000));
1276}
1277
1278/**
1279 * amdgpu_uvd_ring_test_ib - test ib execution
1280 *
1281 * @ring: amdgpu_ring pointer
1282 *
1283 * Test if we can successfully execute an IB
1284 */
1285int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1286{
1287 struct dma_fence *fence;
1288 long r;
1289
1290 r = amdgpu_uvd_get_create_msg(ring, 1, NULL((void *)0));
1291 if (r)
1292 goto error;
1293
1294 r = amdgpu_uvd_get_destroy_msg(ring, 1, true1, &fence);
1295 if (r)
1296 goto error;
1297
1298 r = dma_fence_wait_timeout(fence, false0, timeout);
1299 if (r == 0)
1300 r = -ETIMEDOUT60;
1301 else if (r > 0)
1302 r = 0;
1303
1304 dma_fence_put(fence);
1305
1306error:
1307 return r;
1308}
1309
1310/**
1311 * amdgpu_uvd_used_handles - returns used UVD handles
1312 *
1313 * @adev: amdgpu_device pointer
1314 *
1315 * Returns the number of UVD handles in use
1316 */
1317uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
1318{
1319 unsigned i;
1320 uint32_t used_handles = 0;
1321
1322 for (i = 0; i < adev->uvd.max_handles; ++i) {
1323 /*
1324 * Handles can be freed in any order, and not
1325 * necessarily linear. So we need to count
1326 * all non-zero handles.
1327 */
1328 if (atomic_read(&adev->uvd.handles[i])({ typeof(*(&adev->uvd.handles[i])) __tmp = *(volatile
typeof(*(&adev->uvd.handles[i])) *)&(*(&adev->
uvd.handles[i])); membar_datadep_consumer(); __tmp; })
)
1329 used_handles++;
1330 }
1331
1332 return used_handles;
1333}

/usr/src/sys/dev/pci/drm/include/linux/workqueue.h

1/* $OpenBSD: workqueue.h,v 1.6 2021/08/14 03:12:51 jsg Exp $ */
2/*
3 * Copyright (c) 2015 Mark Kettenis
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _LINUX_WORKQUEUE_H
19#define _LINUX_WORKQUEUE_H
20
21#include <sys/param.h>
22#include <sys/systm.h>
23#include <sys/task.h>
24#include <sys/timeout.h>
25#include <linux/bitops.h>
26#include <linux/atomic.h>
27#include <linux/rcupdate.h>
28#include <linux/kernel.h>
29#include <linux/lockdep.h>
30#include <linux/timer.h>
31
32struct workqueue_struct;
33
34extern struct workqueue_struct *system_wq;
35extern struct workqueue_struct *system_highpri_wq;
36extern struct workqueue_struct *system_unbound_wq;
37extern struct workqueue_struct *system_long_wq;
38
39#define WQ_HIGHPRI1 1
40#define WQ_FREEZABLE2 2
41#define WQ_UNBOUND4 4
42
43#define WQ_UNBOUND_MAX_ACTIVE4 4 /* matches nthreads in drm_linux.c */
44
45static inline struct workqueue_struct *
46alloc_workqueue(const char *name, int flags, int max_active)
47{
48 struct taskq *tq = taskq_create(name, 1, IPL_TTY0x9, 0);
49 return (struct workqueue_struct *)tq;
50}
51
52static inline struct workqueue_struct *
53alloc_ordered_workqueue(const char *name, int flags)
54{
55 struct taskq *tq = taskq_create(name, 1, IPL_TTY0x9, 0);
56 return (struct workqueue_struct *)tq;
57}
58
59static inline struct workqueue_struct *
60create_singlethread_workqueue(const char *name)
61{
62 struct taskq *tq = taskq_create(name, 1, IPL_TTY0x9, 0);
63 return (struct workqueue_struct *)tq;
64}
65
66static inline void
67destroy_workqueue(struct workqueue_struct *wq)
68{
69 taskq_destroy((struct taskq *)wq);
70}
71
72struct work_struct {
73 struct task task;
74 struct taskq *tq;
75};
76
77typedef void (*work_func_t)(struct work_struct *);
78
79static inline void
80INIT_WORK(struct work_struct *work, work_func_t func)
81{
82 work->tq = NULL((void *)0);
83 task_set(&work->task, (void (*)(void *))func, work);
84}
85
86#define INIT_WORK_ONSTACK(x, y)INIT_WORK((x), (y)) INIT_WORK((x), (y))
87
88static inline bool_Bool
89queue_work(struct workqueue_struct *wq, struct work_struct *work)
90{
91 work->tq = (struct taskq *)wq;
92 return task_add(work->tq, &work->task);
93}
94
95static inline void
96cancel_work_sync(struct work_struct *work)
97{
98 if (work->tq != NULL((void *)0))
99 task_del(work->tq, &work->task);
100}
101
102#define work_pending(work)((&(work)->task)->t_flags & 1) task_pending(&(work)->task)((&(work)->task)->t_flags & 1)
103
104struct delayed_work {
105 struct work_struct work;
106 struct timeout to;
107 struct taskq *tq;
108};
109
110#define system_power_efficient_wq((struct workqueue_struct *)systq) ((struct workqueue_struct *)systq)
111
112static inline struct delayed_work *
113to_delayed_work(struct work_struct *work)
114{
115 return container_of(work, struct delayed_work, work)({ const __typeof( ((struct delayed_work *)0)->work ) *__mptr
= (work); (struct delayed_work *)( (char *)__mptr - __builtin_offsetof
(struct delayed_work, work) );})
;
116}
117
118static void
119__delayed_work_tick(void *arg)
120{
121 struct delayed_work *dwork = arg;
122
123 task_add(dwork->tq, &dwork->work.task);
124}
125
126static inline void
127INIT_DELAYED_WORK(struct delayed_work *dwork, work_func_t func)
128{
129 INIT_WORK(&dwork->work, func);
130 timeout_set(&dwork->to, __delayed_work_tick, &dwork->work);
2
Value assigned to field 'asic_type', which participates in a condition later
131}
132
133static inline void
134INIT_DELAYED_WORK_ONSTACK(struct delayed_work *dwork, work_func_t func)
135{
136 INIT_WORK(&dwork->work, func);
137 timeout_set(&dwork->to, __delayed_work_tick, &dwork->work);
138}
139
140static inline bool_Bool
141schedule_work(struct work_struct *work)
142{
143 work->tq = (struct taskq *)system_wq;
144 return task_add(work->tq, &work->task);
145}
146
147static inline bool_Bool
148schedule_delayed_work(struct delayed_work *dwork, int jiffies)
149{
150 dwork->tq = (struct taskq *)system_wq;
151 return timeout_add(&dwork->to, jiffies);
152}
153
154static inline bool_Bool
155queue_delayed_work(struct workqueue_struct *wq,
156 struct delayed_work *dwork, int jiffies)
157{
158 dwork->tq = (struct taskq *)wq;
159 return timeout_add(&dwork->to, jiffies);
160}
161
162static inline bool_Bool
163mod_delayed_work(struct workqueue_struct *wq,
164 struct delayed_work *dwork, int jiffies)
165{
166 dwork->tq = (struct taskq *)wq;
167 return (timeout_add(&dwork->to, jiffies) == 0);
168}
169
170static inline bool_Bool
171cancel_delayed_work(struct delayed_work *dwork)
172{
173 if (dwork->tq == NULL((void *)0))
174 return false0;
175 if (timeout_del(&dwork->to))
176 return true1;
177 return task_del(dwork->tq, &dwork->work.task);
178}
179
180static inline bool_Bool
181cancel_delayed_work_sync(struct delayed_work *dwork)
182{
183 if (dwork->tq == NULL((void *)0))
184 return false0;
185 if (timeout_del(&dwork->to))
186 return true1;
187 return task_del(dwork->tq, &dwork->work.task);
188}
189
190static inline bool_Bool
191delayed_work_pending(struct delayed_work *dwork)
192{
193 if (timeout_pending(&dwork->to)((&dwork->to)->to_flags & 0x02))
194 return true1;
195 return task_pending(&dwork->work.task)((&dwork->work.task)->t_flags & 1);
196}
197
198void flush_workqueue(struct workqueue_struct *);
199bool_Bool flush_work(struct work_struct *);
200bool_Bool flush_delayed_work(struct delayed_work *);
201#define flush_scheduled_work()flush_workqueue(system_wq) flush_workqueue(system_wq)
202#define drain_workqueue(x)flush_workqueue(x) flush_workqueue(x)
203
204static inline void
205destroy_work_on_stack(struct work_struct *work)
206{
207 if (work->tq)
208 task_del(work->tq, &work->task);
209}
210
211#define destroy_delayed_work_on_stack(x)
212
213struct rcu_work {
214 struct work_struct work;
215 struct rcu_head rcu;
216};
217
218static inline void
219INIT_RCU_WORK(struct rcu_work *work, work_func_t func)
220{
221 INIT_WORK(&work->work, func);
222}
223
224static inline bool_Bool
225queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *work)
226{
227 return queue_work(wq, &work->work);
228}
229
230#endif

/usr/src/sys/dev/pci/drm/include/linux/firmware.h

1/* Public domain. */
2
3#ifndef _LINUX_FIRMWARE_H
4#define _LINUX_FIRMWARE_H
5
6#include <sys/types.h>
7#include <sys/malloc.h>
8#include <sys/device.h>
9#include <linux/types.h>
10#include <linux/gfp.h>
11
12#ifndef __DECONST
13#define __DECONST(type, var)((type)(__uintptr_t)(const void *)(var)) ((type)(__uintptr_t)(const void *)(var))
14#endif
15
16struct firmware {
17 size_t size;
18 const u8 *data;
19};
20
21static inline int
22request_firmware(const struct firmware **fw, const char *name,
23 struct device *device)
24{
25 int r;
26 struct firmware *f = malloc(sizeof(struct firmware), M_DRM145,
27 M_WAITOK0x0001 | M_ZERO0x0008);
28 r = loadfirmware(name, __DECONST(u_char **, &f->data)((u_char **)(__uintptr_t)(const void *)(&f->data)), &f->size);
29 if (r != 0) {
7
Assuming 'r' is not equal to 0
8
Taking true branch
30 free(f, M_DRM145, sizeof(struct firmware));
31 *fw = NULL((void *)0);
9
Null pointer value stored to field 'fw'
32 return -r;
33 } else {
34 *fw = f;
35 return 0;
36 }
37}
38
39static inline int
40request_firmware_direct(const struct firmware **fw, const char *name,
41 struct device *device)
42{
43 return request_firmware(fw, name, device);
44}
45
46#define request_firmware_nowait(a, b, c, d, e, f, g)-22 -EINVAL22
47
48static inline void
49release_firmware(const struct firmware *fw)
50{
51 if (fw)
52 free(__DECONST(u_char *, fw->data)((u_char *)(__uintptr_t)(const void *)(fw->data)), M_DEVBUF2, fw->size);
53 free(__DECONST(struct firmware *, fw)((struct firmware *)(__uintptr_t)(const void *)(fw)), M_DRM145, sizeof(*fw));
54}
55
56#endif