Bug Summary

File:dev/pci/drm/amd/amdgpu/amdgpu_vcn.c
Warning:line 153, column 47
Access to field 'data' results in a dereference of a null pointer (loaded from field 'fw')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name amdgpu_vcn.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_vcn.c

/usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_vcn.c

1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30
31#include "amdgpu.h"
32#include "amdgpu_pm.h"
33#include "amdgpu_vcn.h"
34#include "soc15d.h"
35
36/* Firmware Names */
37#define FIRMWARE_RAVEN"amdgpu/raven_vcn.bin" "amdgpu/raven_vcn.bin"
38#define FIRMWARE_PICASSO"amdgpu/picasso_vcn.bin" "amdgpu/picasso_vcn.bin"
39#define FIRMWARE_RAVEN2"amdgpu/raven2_vcn.bin" "amdgpu/raven2_vcn.bin"
40#define FIRMWARE_ARCTURUS"amdgpu/arcturus_vcn.bin" "amdgpu/arcturus_vcn.bin"
41#define FIRMWARE_RENOIR"amdgpu/renoir_vcn.bin" "amdgpu/renoir_vcn.bin"
42#define FIRMWARE_GREEN_SARDINE"amdgpu/green_sardine_vcn.bin" "amdgpu/green_sardine_vcn.bin"
43#define FIRMWARE_NAVI10"amdgpu/navi10_vcn.bin" "amdgpu/navi10_vcn.bin"
44#define FIRMWARE_NAVI14"amdgpu/navi14_vcn.bin" "amdgpu/navi14_vcn.bin"
45#define FIRMWARE_NAVI12"amdgpu/navi12_vcn.bin" "amdgpu/navi12_vcn.bin"
46#define FIRMWARE_SIENNA_CICHLID"amdgpu/sienna_cichlid_vcn.bin" "amdgpu/sienna_cichlid_vcn.bin"
47#define FIRMWARE_NAVY_FLOUNDER"amdgpu/navy_flounder_vcn.bin" "amdgpu/navy_flounder_vcn.bin"
48
49MODULE_FIRMWARE(FIRMWARE_RAVEN);
50MODULE_FIRMWARE(FIRMWARE_PICASSO);
51MODULE_FIRMWARE(FIRMWARE_RAVEN2);
52MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
53MODULE_FIRMWARE(FIRMWARE_RENOIR);
54MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
55MODULE_FIRMWARE(FIRMWARE_NAVI10);
56MODULE_FIRMWARE(FIRMWARE_NAVI14);
57MODULE_FIRMWARE(FIRMWARE_NAVI12);
58MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
59MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
60
61static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
62
63int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
64{
65 unsigned long bo_size;
66 const char *fw_name;
67 const struct common_firmware_header *hdr;
68 unsigned char fw_check;
69 int i, r;
70
71 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
72 rw_init(&adev->vcn.vcn_pg_lock, "vcnpg")_rw_init_flags(&adev->vcn.vcn_pg_lock, "vcnpg", 0, ((void
*)0))
;
73 rw_init(&adev->vcn.vcn1_jpeg1_workaround, "vcnwa")_rw_init_flags(&adev->vcn.vcn1_jpeg1_workaround, "vcnwa"
, 0, ((void *)0))
;
74 atomic_set(&adev->vcn.total_submission_cnt, 0)({ typeof(*(&adev->vcn.total_submission_cnt)) __tmp = (
(0)); *(volatile typeof(*(&adev->vcn.total_submission_cnt
)) *)&(*(&adev->vcn.total_submission_cnt)) = __tmp
; __tmp; })
;
75 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1
Assuming 'i' is >= field 'num_vcn_inst'
2
Loop condition is false. Execution continues on line 78
76 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0)({ typeof(*(&adev->vcn.inst[i].dpg_enc_submission_cnt)
) __tmp = ((0)); *(volatile typeof(*(&adev->vcn.inst[i
].dpg_enc_submission_cnt)) *)&(*(&adev->vcn.inst[i
].dpg_enc_submission_cnt)) = __tmp; __tmp; })
;
77
78 switch (adev->asic_type) {
3
Control jumps to 'case CHIP_NAVY_FLOUNDER:' at line 127
79 case CHIP_RAVEN:
80 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
81 fw_name = FIRMWARE_RAVEN2"amdgpu/raven2_vcn.bin";
82 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
83 fw_name = FIRMWARE_PICASSO"amdgpu/picasso_vcn.bin";
84 else
85 fw_name = FIRMWARE_RAVEN"amdgpu/raven_vcn.bin";
86 break;
87 case CHIP_ARCTURUS:
88 fw_name = FIRMWARE_ARCTURUS"amdgpu/arcturus_vcn.bin";
89 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
90 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15)))
91 adev->vcn.indirect_sram = true1;
92 break;
93 case CHIP_RENOIR:
94 if (adev->apu_flags & AMD_APU_IS_RENOIR)
95 fw_name = FIRMWARE_RENOIR"amdgpu/renoir_vcn.bin";
96 else
97 fw_name = FIRMWARE_GREEN_SARDINE"amdgpu/green_sardine_vcn.bin";
98
99 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
100 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15)))
101 adev->vcn.indirect_sram = true1;
102 break;
103 case CHIP_NAVI10:
104 fw_name = FIRMWARE_NAVI10"amdgpu/navi10_vcn.bin";
105 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
106 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15)))
107 adev->vcn.indirect_sram = true1;
108 break;
109 case CHIP_NAVI14:
110 fw_name = FIRMWARE_NAVI14"amdgpu/navi14_vcn.bin";
111 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
112 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15)))
113 adev->vcn.indirect_sram = true1;
114 break;
115 case CHIP_NAVI12:
116 fw_name = FIRMWARE_NAVI12"amdgpu/navi12_vcn.bin";
117 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
118 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15)))
119 adev->vcn.indirect_sram = true1;
120 break;
121 case CHIP_SIENNA_CICHLID:
122 fw_name = FIRMWARE_SIENNA_CICHLID"amdgpu/sienna_cichlid_vcn.bin";
123 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
124 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15)))
125 adev->vcn.indirect_sram = true1;
126 break;
127 case CHIP_NAVY_FLOUNDER:
128 fw_name = FIRMWARE_NAVY_FLOUNDER"amdgpu/navy_flounder_vcn.bin";
129 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
4
Assuming field 'load_type' is not equal to AMDGPU_FW_LOAD_PSP
130 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15)))
131 adev->vcn.indirect_sram = true1;
132 break;
5
Execution continues on line 137
133 default:
134 return -EINVAL22;
135 }
136
137 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
6
Calling 'request_firmware'
10
Returning from 'request_firmware'
138 if (r) {
11
Assuming 'r' is 0
12
Taking false branch
139 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",printf("drm:pid%d:%s *ERROR* " "amdgpu_vcn: Can't load firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name
)
140 fw_name)printf("drm:pid%d:%s *ERROR* " "amdgpu_vcn: Can't load firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name
)
;
141 return r;
142 }
143
144 r = amdgpu_ucode_validate(adev->vcn.fw);
145 if (r) {
13
Assuming 'r' is 0
14
Taking false branch
146 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",printf("drm:pid%d:%s *ERROR* " "amdgpu_vcn: Can't validate firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name
)
147 fw_name)printf("drm:pid%d:%s *ERROR* " "amdgpu_vcn: Can't validate firmware \"%s\"\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , fw_name
)
;
148 release_firmware(adev->vcn.fw);
149 adev->vcn.fw = NULL((void *)0);
150 return r;
151 }
152
153 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
15
Access to field 'data' results in a dereference of a null pointer (loaded from field 'fw')
154 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version));
155
156 /* Bit 20-23, it is encode major and non-zero for new naming convention.
157 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
158 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
159 * is zero in old naming convention, this field is always zero so far.
160 * These four bits are used to tell which naming convention is present.
161 */
162 fw_check = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 20) & 0xf;
163 if (fw_check) {
164 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
165
166 fw_rev = le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) & 0xfff;
167 enc_minor = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 12) & 0xff;
168 enc_major = fw_check;
169 dec_ver = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 24) & 0xf;
170 vep = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 28) & 0xf;
171 DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",printk("\0016" "[" "drm" "] " "Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n"
, enc_major, enc_minor, dec_ver, vep, fw_rev)
172 enc_major, enc_minor, dec_ver, vep, fw_rev)printk("\0016" "[" "drm" "] " "Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n"
, enc_major, enc_minor, dec_ver, vep, fw_rev)
;
173 } else {
174 unsigned int version_major, version_minor, family_id;
175
176 family_id = le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) & 0xff;
177 version_major = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 24) & 0xff;
178 version_minor = (le32_to_cpu(hdr->ucode_version)((__uint32_t)(hdr->ucode_version)) >> 8) & 0xff;
179 DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",printk("\0016" "[" "drm" "] " "Found VCN firmware Version: %u.%u Family ID: %u\n"
, version_major, version_minor, family_id)
180 version_major, version_minor, family_id)printk("\0016" "[" "drm" "] " "Found VCN firmware Version: %u.%u Family ID: %u\n"
, version_major, version_minor, family_id)
;
181 }
182
183 bo_size = AMDGPU_VCN_STACK_SIZE(128*1024) + AMDGPU_VCN_CONTEXT_SIZE(512*1024);
184 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
185 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)(((((__uint32_t)(hdr->ucode_size_bytes)) + 8) + (4096 - 1)
) & ~(4096 - 1))
;
186 bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))(((sizeof(struct amdgpu_fw_shared)) + (4096 - 1)) & ~(4096
- 1))
;
187
188 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
189 if (adev->vcn.harvest_config & (1 << i))
190 continue;
191
192 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE(1 << 12),
193 AMDGPU_GEM_DOMAIN_VRAM0x4, &adev->vcn.inst[i].vcpu_bo,
194 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
195 if (r) {
196 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r)printf("drm:pid%d:%s *ERROR* " "(%d) failed to allocate vcn bo\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
197 return r;
198 }
199
200 adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
201 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))(((sizeof(struct amdgpu_fw_shared)) + (4096 - 1)) & ~(4096
- 1))
;
202 adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
203 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))(((sizeof(struct amdgpu_fw_shared)) + (4096 - 1)) & ~(4096
- 1))
;
204
205 if (adev->vcn.indirect_sram) {
206 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE(1 << 12),
207 AMDGPU_GEM_DOMAIN_VRAM0x4, &adev->vcn.inst[i].dpg_sram_bo,
208 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
209 if (r) {
210 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r)printf("drm:pid%d:%s *ERROR* " "VCN %d (%d) failed to allocate DPG bo\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , i, r)
;
211 return r;
212 }
213 }
214 }
215
216 return 0;
217}
218
219int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
220{
221 int i, j;
222
223 cancel_delayed_work_sync(&adev->vcn.idle_work);
224
225 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
226 if (adev->vcn.harvest_config & (1 << j))
227 continue;
228
229 if (adev->vcn.indirect_sram) {
230 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
231 &adev->vcn.inst[j].dpg_sram_gpu_addr,
232 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
233 }
234 kvfree(adev->vcn.inst[j].saved_bo);
235
236 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
237 &adev->vcn.inst[j].gpu_addr,
238 (void **)&adev->vcn.inst[j].cpu_addr);
239
240 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
241
242 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
243 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
244 }
245
246 release_firmware(adev->vcn.fw);
247 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
248 mutex_destroy(&adev->vcn.vcn_pg_lock);
249
250 return 0;
251}
252
253int amdgpu_vcn_suspend(struct amdgpu_device *adev)
254{
255 unsigned size;
256 void *ptr;
257 int i;
258
259 cancel_delayed_work_sync(&adev->vcn.idle_work);
260
261 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
262 if (adev->vcn.harvest_config & (1 << i))
263 continue;
264 if (adev->vcn.inst[i].vcpu_bo == NULL((void *)0))
265 return 0;
266
267 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
268 ptr = adev->vcn.inst[i].cpu_addr;
269
270 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL(0x0001 | 0x0004));
271 if (!adev->vcn.inst[i].saved_bo)
272 return -ENOMEM12;
273
274 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size)__builtin_memcpy((adev->vcn.inst[i].saved_bo), (ptr), (size
))
;
275 }
276 return 0;
277}
278
279int amdgpu_vcn_resume(struct amdgpu_device *adev)
280{
281 unsigned size;
282 void *ptr;
283 int i;
284
285 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
286 if (adev->vcn.harvest_config & (1 << i))
287 continue;
288 if (adev->vcn.inst[i].vcpu_bo == NULL((void *)0))
289 return -EINVAL22;
290
291 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
292 ptr = adev->vcn.inst[i].cpu_addr;
293
294 if (adev->vcn.inst[i].saved_bo != NULL((void *)0)) {
295 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size)__builtin_memcpy((ptr), (adev->vcn.inst[i].saved_bo), (size
))
;
296 kvfree(adev->vcn.inst[i].saved_bo);
297 adev->vcn.inst[i].saved_bo = NULL((void *)0);
298 } else {
299 const struct common_firmware_header *hdr;
300 unsigned offset;
301
302 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
303 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
304 offset = le32_to_cpu(hdr->ucode_array_offset_bytes)((__uint32_t)(hdr->ucode_array_offset_bytes));
305 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,__builtin_memcpy((adev->vcn.inst[i].cpu_addr), (adev->vcn
.fw->data + offset), (((__uint32_t)(hdr->ucode_size_bytes
))))
306 le32_to_cpu(hdr->ucode_size_bytes))__builtin_memcpy((adev->vcn.inst[i].cpu_addr), (adev->vcn
.fw->data + offset), (((__uint32_t)(hdr->ucode_size_bytes
))))
;
307 size -= le32_to_cpu(hdr->ucode_size_bytes)((__uint32_t)(hdr->ucode_size_bytes));
308 ptr += le32_to_cpu(hdr->ucode_size_bytes)((__uint32_t)(hdr->ucode_size_bytes));
309 }
310 memset_io(ptr, 0, size)__builtin_memset((ptr), (0), (size));
311 }
312 }
313 return 0;
314}
315
316static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
317{
318 struct amdgpu_device *adev =
319 container_of(work, struct amdgpu_device, vcn.idle_work.work)({ const __typeof( ((struct amdgpu_device *)0)->vcn.idle_work
.work ) *__mptr = (work); (struct amdgpu_device *)( (char *)__mptr
- __builtin_offsetof(struct amdgpu_device, vcn.idle_work.work
) );})
;
320 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES2] = {0};
321 unsigned int i, j;
322
323 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
324 if (adev->vcn.harvest_config & (1 << j))
325 continue;
326
327 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
328 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
329 }
330
331 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15)) {
332 struct dpg_pause_state new_state;
333
334 if (fence[j] ||
335 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt))__builtin_expect(!!(({ typeof(*(&adev->vcn.inst[j].dpg_enc_submission_cnt
)) __tmp = *(volatile typeof(*(&adev->vcn.inst[j].dpg_enc_submission_cnt
)) *)&(*(&adev->vcn.inst[j].dpg_enc_submission_cnt
)); membar_datadep_consumer(); __tmp; })), 0)
)
336 new_state.fw_based = VCN_DPG_STATE__PAUSE;
337 else
338 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
339
340 adev->vcn.pause_dpg_mode(adev, j, &new_state);
341 }
342
343 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
344 fences += fence[j];
345 }
346
347 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)({ typeof(*(&adev->vcn.total_submission_cnt)) __tmp = *
(volatile typeof(*(&adev->vcn.total_submission_cnt)) *
)&(*(&adev->vcn.total_submission_cnt)); membar_datadep_consumer
(); __tmp; })
) {
348 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
349 AMD_PG_STATE_GATE);
350 } else {
351 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT(((uint64_t)(1000)) * hz / 1000));
352 }
353}
354
355void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
356{
357 struct amdgpu_device *adev = ring->adev;
358
359 atomic_inc(&adev->vcn.total_submission_cnt)__sync_fetch_and_add(&adev->vcn.total_submission_cnt, 1
)
;
360 cancel_delayed_work_sync(&adev->vcn.idle_work);
361
362 mutex_lock(&adev->vcn.vcn_pg_lock)rw_enter_write(&adev->vcn.vcn_pg_lock);
363 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
364 AMD_PG_STATE_UNGATE);
365
366 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15)) {
367 struct dpg_pause_state new_state;
368
369 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
370 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)__sync_fetch_and_add(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt
, 1)
;
371 new_state.fw_based = VCN_DPG_STATE__PAUSE;
372 } else {
373 unsigned int fences = 0;
374 unsigned int i;
375
376 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
377 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
378
379 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)({ typeof(*(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt
)) __tmp = *(volatile typeof(*(&adev->vcn.inst[ring->
me].dpg_enc_submission_cnt)) *)&(*(&adev->vcn.inst
[ring->me].dpg_enc_submission_cnt)); membar_datadep_consumer
(); __tmp; })
)
380 new_state.fw_based = VCN_DPG_STATE__PAUSE;
381 else
382 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
383 }
384
385 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
386 }
387 mutex_unlock(&adev->vcn.vcn_pg_lock)rw_exit_write(&adev->vcn.vcn_pg_lock);
388}
389
390void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
391{
392 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15) &&
393 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
394 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt)__sync_fetch_and_sub(&ring->adev->vcn.inst[ring->
me].dpg_enc_submission_cnt, 1)
;
395
396 atomic_dec(&ring->adev->vcn.total_submission_cnt)__sync_fetch_and_sub(&ring->adev->vcn.total_submission_cnt
, 1)
;
397
398 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT(((uint64_t)(1000)) * hz / 1000));
399}
400
401int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
402{
403 struct amdgpu_device *adev = ring->adev;
404 uint32_t tmp = 0;
405 unsigned i;
406 int r;
407
408 /* VCN in SRIOV does not support direct register read/write */
409 if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2)))
410 return 0;
411
412 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD)amdgpu_device_wreg(adev, (adev->vcn.inst[ring->me].external
.scratch9), (0xCAFEDEAD), 0)
;
413 r = amdgpu_ring_alloc(ring, 3);
414 if (r)
415 return r;
416 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0)((0 << 30) | ((adev->vcn.internal.scratch9) & 0xFFFF
) | ((0) & 0x3FFF) << 16)
);
417 amdgpu_ring_write(ring, 0xDEADBEEF);
418 amdgpu_ring_commit(ring);
419 for (i = 0; i < adev->usec_timeout; i++) {
420 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9)amdgpu_device_rreg(adev, (adev->vcn.inst[ring->me].external
.scratch9), 0)
;
421 if (tmp == 0xDEADBEEF)
422 break;
423 udelay(1);
424 }
425
426 if (i >= adev->usec_timeout)
427 r = -ETIMEDOUT60;
428
429 return r;
430}
431
432static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
433 struct amdgpu_bo *bo,
434 struct dma_fence **fence)
435{
436 struct amdgpu_device *adev = ring->adev;
437 struct dma_fence *f = NULL((void *)0);
438 struct amdgpu_job *job;
439 struct amdgpu_ib *ib;
440 uint64_t addr;
441 int i, r;
442
443 r = amdgpu_job_alloc_with_ib(adev, 64,
444 AMDGPU_IB_POOL_DIRECT, &job);
445 if (r)
446 goto err;
447
448 ib = &job->ibs[0];
449 addr = amdgpu_bo_gpu_offset(bo);
450 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0)((0 << 30) | ((adev->vcn.internal.data0) & 0xFFFF
) | ((0) & 0x3FFF) << 16)
;
451 ib->ptr[1] = addr;
452 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0)((0 << 30) | ((adev->vcn.internal.data1) & 0xFFFF
) | ((0) & 0x3FFF) << 16)
;
453 ib->ptr[3] = addr >> 32;
454 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0)((0 << 30) | ((adev->vcn.internal.cmd) & 0xFFFF)
| ((0) & 0x3FFF) << 16)
;
455 ib->ptr[5] = 0;
456 for (i = 6; i < 16; i += 2) {
457 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0)((0 << 30) | ((adev->vcn.internal.nop) & 0xFFFF)
| ((0) & 0x3FFF) << 16)
;
458 ib->ptr[i+1] = 0;
459 }
460 ib->length_dw = 16;
461
462 r = amdgpu_job_submit_direct(job, ring, &f);
463 if (r)
464 goto err_free;
465
466 amdgpu_bo_fence(bo, f, false0);
467 amdgpu_bo_unreserve(bo);
468 amdgpu_bo_unref(&bo);
469
470 if (fence)
471 *fence = dma_fence_get(f);
472 dma_fence_put(f);
473
474 return 0;
475
476err_free:
477 amdgpu_job_free(job);
478
479err:
480 amdgpu_bo_unreserve(bo);
481 amdgpu_bo_unref(&bo);
482 return r;
483}
484
485static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
486 struct dma_fence **fence)
487{
488 struct amdgpu_device *adev = ring->adev;
489 struct amdgpu_bo *bo = NULL((void *)0);
490 uint32_t *msg;
491 int r, i;
492
493 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE(1 << 12),
494 AMDGPU_GEM_DOMAIN_VRAM0x4,
495 &bo, NULL((void *)0), (void **)&msg);
496 if (r)
497 return r;
498
499 msg[0] = cpu_to_le32(0x00000028)((__uint32_t)(0x00000028));
500 msg[1] = cpu_to_le32(0x00000038)((__uint32_t)(0x00000038));
501 msg[2] = cpu_to_le32(0x00000001)((__uint32_t)(0x00000001));
502 msg[3] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
503 msg[4] = cpu_to_le32(handle)((__uint32_t)(handle));
504 msg[5] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
505 msg[6] = cpu_to_le32(0x00000001)((__uint32_t)(0x00000001));
506 msg[7] = cpu_to_le32(0x00000028)((__uint32_t)(0x00000028));
507 msg[8] = cpu_to_le32(0x00000010)((__uint32_t)(0x00000010));
508 msg[9] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
509 msg[10] = cpu_to_le32(0x00000007)((__uint32_t)(0x00000007));
510 msg[11] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
511 msg[12] = cpu_to_le32(0x00000780)((__uint32_t)(0x00000780));
512 msg[13] = cpu_to_le32(0x00000440)((__uint32_t)(0x00000440));
513 for (i = 14; i < 1024; ++i)
514 msg[i] = cpu_to_le32(0x0)((__uint32_t)(0x0));
515
516 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
517}
518
519static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
520 struct dma_fence **fence)
521{
522 struct amdgpu_device *adev = ring->adev;
523 struct amdgpu_bo *bo = NULL((void *)0);
524 uint32_t *msg;
525 int r, i;
526
527 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE(1 << 12),
528 AMDGPU_GEM_DOMAIN_VRAM0x4,
529 &bo, NULL((void *)0), (void **)&msg);
530 if (r)
531 return r;
532
533 msg[0] = cpu_to_le32(0x00000028)((__uint32_t)(0x00000028));
534 msg[1] = cpu_to_le32(0x00000018)((__uint32_t)(0x00000018));
535 msg[2] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
536 msg[3] = cpu_to_le32(0x00000002)((__uint32_t)(0x00000002));
537 msg[4] = cpu_to_le32(handle)((__uint32_t)(handle));
538 msg[5] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
539 for (i = 6; i < 1024; ++i)
540 msg[i] = cpu_to_le32(0x0)((__uint32_t)(0x0));
541
542 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
543}
544
545int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
546{
547 struct dma_fence *fence;
548 long r;
549
550 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL((void *)0));
551 if (r)
552 goto error;
553
554 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
555 if (r)
556 goto error;
557
558 r = dma_fence_wait_timeout(fence, false0, timeout);
559 if (r == 0)
560 r = -ETIMEDOUT60;
561 else if (r > 0)
562 r = 0;
563
564 dma_fence_put(fence);
565error:
566 return r;
567}
568
569int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
570{
571 struct amdgpu_device *adev = ring->adev;
572 uint32_t rptr;
573 unsigned i;
574 int r;
575
576 if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2)))
577 return 0;
578
579 r = amdgpu_ring_alloc(ring, 16);
580 if (r)
581 return r;
582
583 rptr = amdgpu_ring_get_rptr(ring)(ring)->funcs->get_rptr((ring));
584
585 amdgpu_ring_write(ring, VCN_ENC_CMD_END0x00000001);
586 amdgpu_ring_commit(ring);
587
588 for (i = 0; i < adev->usec_timeout; i++) {
589 if (amdgpu_ring_get_rptr(ring)(ring)->funcs->get_rptr((ring)) != rptr)
590 break;
591 udelay(1);
592 }
593
594 if (i >= adev->usec_timeout)
595 r = -ETIMEDOUT60;
596
597 return r;
598}
599
600static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
601 struct amdgpu_bo *bo,
602 struct dma_fence **fence)
603{
604 const unsigned ib_size_dw = 16;
605 struct amdgpu_job *job;
606 struct amdgpu_ib *ib;
607 struct dma_fence *f = NULL((void *)0);
608 uint64_t addr;
609 int i, r;
610
611 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
612 AMDGPU_IB_POOL_DIRECT, &job);
613 if (r)
614 return r;
615
616 ib = &job->ibs[0];
617 addr = amdgpu_bo_gpu_offset(bo);
618
619 ib->length_dw = 0;
620 ib->ptr[ib->length_dw++] = 0x00000018;
621 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
622 ib->ptr[ib->length_dw++] = handle;
623 ib->ptr[ib->length_dw++] = upper_32_bits(addr)((u32)(((addr) >> 16) >> 16));
624 ib->ptr[ib->length_dw++] = addr;
625 ib->ptr[ib->length_dw++] = 0x0000000b;
626
627 ib->ptr[ib->length_dw++] = 0x00000014;
628 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
629 ib->ptr[ib->length_dw++] = 0x0000001c;
630 ib->ptr[ib->length_dw++] = 0x00000000;
631 ib->ptr[ib->length_dw++] = 0x00000000;
632
633 ib->ptr[ib->length_dw++] = 0x00000008;
634 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
635
636 for (i = ib->length_dw; i < ib_size_dw; ++i)
637 ib->ptr[i] = 0x0;
638
639 r = amdgpu_job_submit_direct(job, ring, &f);
640 if (r)
641 goto err;
642
643 if (fence)
644 *fence = dma_fence_get(f);
645 dma_fence_put(f);
646
647 return 0;
648
649err:
650 amdgpu_job_free(job);
651 return r;
652}
653
654static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
655 struct amdgpu_bo *bo,
656 struct dma_fence **fence)
657{
658 const unsigned ib_size_dw = 16;
659 struct amdgpu_job *job;
660 struct amdgpu_ib *ib;
661 struct dma_fence *f = NULL((void *)0);
662 uint64_t addr;
663 int i, r;
664
665 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
666 AMDGPU_IB_POOL_DIRECT, &job);
667 if (r)
668 return r;
669
670 ib = &job->ibs[0];
671 addr = amdgpu_bo_gpu_offset(bo);
672
673 ib->length_dw = 0;
674 ib->ptr[ib->length_dw++] = 0x00000018;
675 ib->ptr[ib->length_dw++] = 0x00000001;
676 ib->ptr[ib->length_dw++] = handle;
677 ib->ptr[ib->length_dw++] = upper_32_bits(addr)((u32)(((addr) >> 16) >> 16));
678 ib->ptr[ib->length_dw++] = addr;
679 ib->ptr[ib->length_dw++] = 0x0000000b;
680
681 ib->ptr[ib->length_dw++] = 0x00000014;
682 ib->ptr[ib->length_dw++] = 0x00000002;
683 ib->ptr[ib->length_dw++] = 0x0000001c;
684 ib->ptr[ib->length_dw++] = 0x00000000;
685 ib->ptr[ib->length_dw++] = 0x00000000;
686
687 ib->ptr[ib->length_dw++] = 0x00000008;
688 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
689
690 for (i = ib->length_dw; i < ib_size_dw; ++i)
691 ib->ptr[i] = 0x0;
692
693 r = amdgpu_job_submit_direct(job, ring, &f);
694 if (r)
695 goto err;
696
697 if (fence)
698 *fence = dma_fence_get(f);
699 dma_fence_put(f);
700
701 return 0;
702
703err:
704 amdgpu_job_free(job);
705 return r;
706}
707
708int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
709{
710 struct dma_fence *fence = NULL((void *)0);
711 struct amdgpu_bo *bo = NULL((void *)0);
712 long r;
713
714 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE(1 << 12),
715 AMDGPU_GEM_DOMAIN_VRAM0x4,
716 &bo, NULL((void *)0), NULL((void *)0));
717 if (r)
718 return r;
719
720 r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL((void *)0));
721 if (r)
722 goto error;
723
724 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
725 if (r)
726 goto error;
727
728 r = dma_fence_wait_timeout(fence, false0, timeout);
729 if (r == 0)
730 r = -ETIMEDOUT60;
731 else if (r > 0)
732 r = 0;
733
734error:
735 dma_fence_put(fence);
736 amdgpu_bo_unreserve(bo);
737 amdgpu_bo_unref(&bo);
738 return r;
739}

/usr/src/sys/dev/pci/drm/include/linux/firmware.h

1/* Public domain. */
2
3#ifndef _LINUX_FIRMWARE_H
4#define _LINUX_FIRMWARE_H
5
6#include <sys/types.h>
7#include <sys/malloc.h>
8#include <sys/device.h>
9#include <linux/types.h>
10#include <linux/gfp.h>
11
12#ifndef __DECONST
13#define __DECONST(type, var)((type)(__uintptr_t)(const void *)(var)) ((type)(__uintptr_t)(const void *)(var))
14#endif
15
16struct firmware {
17 size_t size;
18 const u8 *data;
19};
20
21static inline int
22request_firmware(const struct firmware **fw, const char *name,
23 struct device *device)
24{
25 int r;
26 struct firmware *f = malloc(sizeof(struct firmware), M_DRM145,
27 M_WAITOK0x0001 | M_ZERO0x0008);
28 r = loadfirmware(name, __DECONST(u_char **, &f->data)((u_char **)(__uintptr_t)(const void *)(&f->data)), &f->size);
29 if (r != 0) {
7
Assuming 'r' is not equal to 0
8
Taking true branch
30 free(f, M_DRM145, sizeof(struct firmware));
31 *fw = NULL((void *)0);
9
Null pointer value stored to field 'fw'
32 return -r;
33 } else {
34 *fw = f;
35 return 0;
36 }
37}
38
39static inline int
40request_firmware_direct(const struct firmware **fw, const char *name,
41 struct device *device)
42{
43 return request_firmware(fw, name, device);
44}
45
46#define request_firmware_nowait(a, b, c, d, e, f, g)-22 -EINVAL22
47
48static inline void
49release_firmware(const struct firmware *fw)
50{
51 if (fw)
52 free(__DECONST(u_char *, fw->data)((u_char *)(__uintptr_t)(const void *)(fw->data)), M_DEVBUF2, fw->size);
53 free(__DECONST(struct firmware *, fw)((struct firmware *)(__uintptr_t)(const void *)(fw)), M_DRM145, sizeof(*fw));
54}
55
56#endif