Bug Summary

File:dev/pci/drm/i915/gem/i915_gem_phys.c
Warning:line 138, column 13
Value stored to 'dma' during its initialization is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name i915_gem_phys.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/i915/gem/i915_gem_phys.c
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7#include <linux/highmem.h>
8#include <linux/shmem_fs.h>
9#include <linux/swap.h>
10
11#include <drm/drm_cache.h>
12#include <drm/drm_legacy.h> /* for drm_dmamem_alloc() */
13
14#include "gt/intel_gt.h"
15#include "i915_drv.h"
16#include "i915_gem_object.h"
17#include "i915_gem_region.h"
18#include "i915_gem_tiling.h"
19#include "i915_scatterlist.h"
20
21static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
22{
23#ifdef __linux__
24 struct address_space *mapping = obj->base.filp->f_mapping;
25#else
26 struct drm_dmamem *dmah;
27 int flags = 0;
28#endif
29 struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev);
30 struct scatterlist *sg;
31 struct sg_table *st;
32 dma_addr_t dma;
33 void *vaddr;
34 void *dst;
35 int i;
36
37 if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))({ __builtin_expect(!!(!!(i915_gem_object_needs_bit17_swizzle
(obj))), 0); })
)
38 return -EINVAL22;
39
40 /*
41 * Always aligning to the object size, allows a single allocation
42 * to handle all possible callers, and given typical object sizes,
43 * the alignment of the buddy allocation will naturally match.
44 */
45#ifdef __linux__
46 vaddr = dma_alloc_coherent(obj->base.dev->dev,
47 roundup_pow_of_two(obj->base.size),
48 &dma, GFP_KERNEL(0x0001 | 0x0004));
49 if (!vaddr)
50 return -ENOMEM12;
51#else
52 dmah = drm_dmamem_alloc(i915->dmat,
53 roundup_pow_of_two(obj->base.size),
54 PAGE_SIZE(1 << 12), 1,
55 roundup_pow_of_two(obj->base.size), flags, 0);
56 if (dmah == NULL((void *)0))
57 return -ENOMEM12;
58 dma = dmah->map->dm_segs[0].ds_addr;
59 vaddr = dmah->kva;
60#endif
61
62 st = kmalloc(sizeof(*st), GFP_KERNEL(0x0001 | 0x0004));
63 if (!st)
64 goto err_pci;
65
66 if (sg_alloc_table(st, 1, GFP_KERNEL(0x0001 | 0x0004)))
67 goto err_st;
68
69 sg = st->sgl;
70 sg->offset = 0;
71 sg->length = obj->base.size;
72
73#ifdef __linux__
74 sg_assign_page(sg, (struct page *)vaddr);
75#else
76 sg_assign_page(sg, (struct vm_page *)dmah);
77#endif
78 sg_dma_address(sg)((sg)->dma_address) = dma;
79 sg_dma_len(sg)((sg)->length) = obj->base.size;
80
81 dst = vaddr;
82 for (i = 0; i < obj->base.size / PAGE_SIZE(1 << 12); i++) {
83 struct vm_page *page;
84 void *src;
85
86#ifdef __linux__
87 page = shmem_read_mapping_page(mapping, i);
88 if (IS_ERR(page))
89 goto err_st;
90#else
91 struct pglist plist;
92 TAILQ_INIT(&plist)do { (&plist)->tqh_first = ((void *)0); (&plist)->
tqh_last = &(&plist)->tqh_first; } while (0)
;
93 if (uvm_obj_wire(obj->base.uao, i * PAGE_SIZE(1 << 12),
94 (i + 1) * PAGE_SIZE(1 << 12), &plist))
95 goto err_st;
96 page = TAILQ_FIRST(&plist)((&plist)->tqh_first);
97#endif
98
99 src = kmap_atomic(page);
100 memcpy(dst, src, PAGE_SIZE)__builtin_memcpy((dst), (src), ((1 << 12)));
101 drm_clflush_virt_range(dst, PAGE_SIZE(1 << 12));
102 kunmap_atomic(src);
103
104#ifdef __linux__
105 put_page(page);
106#else
107 uvm_obj_unwire(obj->base.uao, i * PAGE_SIZE(1 << 12),
108 (i + 1) * PAGE_SIZE(1 << 12));
109#endif
110 dst += PAGE_SIZE(1 << 12);
111 }
112
113 intel_gt_chipset_flush(to_gt(i915));
114
115 /* We're no longer struct page backed */
116 obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE(1UL << (0));
117 __i915_gem_object_set_pages(obj, st, sg->length);
118
119 return 0;
120
121err_st:
122 kfree(st);
123err_pci:
124#ifdef __linux__
125 dma_free_coherent(obj->base.dev->dev,
126 roundup_pow_of_two(obj->base.size),
127 vaddr, dma);
128#else
129 drm_dmamem_free(i915->dmat, dmah);
130#endif
131 return -ENOMEM12;
132}
133
134void
135i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
136 struct sg_table *pages)
137{
138 dma_addr_t dma = sg_dma_address(pages->sgl)((pages->sgl)->dma_address);
Value stored to 'dma' during its initialization is never read
139#ifdef __linux__
140 void *vaddr = sg_page(pages->sgl);
141#else
142 struct drm_dmamem *dmah = (void *)sg_page(pages->sgl);
143 void *vaddr = dmah->kva;
144 struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev);
145#endif
146
147 __i915_gem_object_release_shmem(obj, pages, false0);
148
149 if (obj->mm.dirty) {
150#ifdef __linux__
151 struct address_space *mapping = obj->base.filp->f_mapping;
152#endif
153 void *src = vaddr;
154 int i;
155
156 for (i = 0; i < obj->base.size / PAGE_SIZE(1 << 12); i++) {
157 struct vm_page *page;
158 char *dst;
159
160#ifdef __linux__
161 page = shmem_read_mapping_page(mapping, i);
162 if (IS_ERR(page))
163 continue;
164#else
165 struct pglist plist;
166 TAILQ_INIT(&plist)do { (&plist)->tqh_first = ((void *)0); (&plist)->
tqh_last = &(&plist)->tqh_first; } while (0)
;
167 if (uvm_obj_wire(obj->base.uao, i * PAGE_SIZE(1 << 12),
168 (i + 1) * PAGE_SIZE(1 << 12), &plist))
169 continue;
170 page = TAILQ_FIRST(&plist)((&plist)->tqh_first);
171#endif
172
173 dst = kmap_atomic(page);
174 drm_clflush_virt_range(src, PAGE_SIZE(1 << 12));
175 memcpy(dst, src, PAGE_SIZE)__builtin_memcpy((dst), (src), ((1 << 12)));
176 kunmap_atomic(dst);
177
178 set_page_dirty(page)x86_atomic_clearbits_u32(&page->pg_flags, 0x00000008);
179#ifdef __linux__
180 if (obj->mm.madv == I915_MADV_WILLNEED0)
181 mark_page_accessed(page);
182 put_page(page);
183#else
184 uvm_obj_unwire(obj->base.uao, i * PAGE_SIZE(1 << 12),
185 (i + 1) * PAGE_SIZE(1 << 12));
186#endif
187
188 src += PAGE_SIZE(1 << 12);
189 }
190 obj->mm.dirty = false0;
191 }
192
193 sg_free_table(pages);
194 kfree(pages);
195
196#ifdef __linux__
197 dma_free_coherent(obj->base.dev->dev,
198 roundup_pow_of_two(obj->base.size),
199 vaddr, dma);
200#else
201 drm_dmamem_free(i915->dmat, dmah);
202#endif
203}
204
205int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
206 const struct drm_i915_gem_pwrite *args)
207{
208#ifdef __linux__
209 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
210#else
211 struct drm_dmamem *dmah = (void *)sg_page(obj->mm.pages->sgl);
212 void *vaddr = dmah->kva + args->offset;
213#endif
214 char __user *user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr));
215 struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev);
216 int err;
217
218 err = i915_gem_object_wait(obj,
219 I915_WAIT_INTERRUPTIBLE(1UL << (0)) |
220 I915_WAIT_ALL(1UL << (2)),
221 MAX_SCHEDULE_TIMEOUT(0x7fffffff));
222 if (err)
223 return err;
224
225 /*
226 * We manually control the domain here and pretend that it
227 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
228 */
229 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
230
231 if (copy_from_user(vaddr, user_data, args->size))
232 return -EFAULT14;
233
234 drm_clflush_virt_range(vaddr, args->size);
235 intel_gt_chipset_flush(to_gt(i915));
236
237 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
238 return 0;
239}
240
241int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
242 const struct drm_i915_gem_pread *args)
243{
244#ifdef __linux__
245 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
246#else
247 struct drm_dmamem *dmah = (void *)sg_page(obj->mm.pages->sgl);
248 void *vaddr = dmah->kva + args->offset;
249#endif
250 char __user *user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr));
251 int err;
252
253 err = i915_gem_object_wait(obj,
254 I915_WAIT_INTERRUPTIBLE(1UL << (0)),
255 MAX_SCHEDULE_TIMEOUT(0x7fffffff));
256 if (err)
257 return err;
258
259 drm_clflush_virt_range(vaddr, args->size);
260 if (copy_to_user(user_data, vaddr, args->size))
261 return -EFAULT14;
262
263 return 0;
264}
265
266static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
267{
268 struct sg_table *pages;
269 int err;
270
271 pages = __i915_gem_object_unset_pages(obj);
272
273 err = i915_gem_object_get_pages_phys(obj);
274 if (err)
275 goto err_xfer;
276
277 /* Perma-pin (until release) the physical set of pages */
278 __i915_gem_object_pin_pages(obj);
279
280 if (!IS_ERR_OR_NULL(pages))
281 i915_gem_object_put_pages_shmem(obj, pages);
282
283 i915_gem_object_release_memory_region(obj);
284 return 0;
285
286err_xfer:
287 if (!IS_ERR_OR_NULL(pages)) {
288 unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
289
290 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
291 }
292 return err;
293}
294
295int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
296{
297 int err;
298
299 assert_object_held(obj)do { (void)(&((obj)->base.resv)->lock.base); } while
(0)
;
300
301 if (align > obj->base.size)
302 return -EINVAL22;
303
304 if (!i915_gem_object_is_shmem(obj))
305 return -EINVAL22;
306
307 if (!i915_gem_object_has_struct_page(obj))
308 return 0;
309
310 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE(1UL << (0)));
311 if (err)
312 return err;
313
314 if (obj->mm.madv != I915_MADV_WILLNEED0)
315 return -EFAULT14;
316
317 if (i915_gem_object_has_tiling_quirk(obj))
318 return -EFAULT14;
319
320 if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
321 return -EBUSY16;
322
323 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)__builtin_expect(!!(obj->mm.madv != 0), 0)) {
324 drm_dbg(obj->base.dev,__drm_dev_dbg(((void *)0), (obj->base.dev) ? (obj->base
.dev)->dev : ((void *)0), DRM_UT_DRIVER, "Attempting to obtain a purgeable object\n"
)
325 "Attempting to obtain a purgeable object\n")__drm_dev_dbg(((void *)0), (obj->base.dev) ? (obj->base
.dev)->dev : ((void *)0), DRM_UT_DRIVER, "Attempting to obtain a purgeable object\n"
)
;
326 return -EFAULT14;
327 }
328
329 return i915_gem_object_shmem_to_phys(obj);
330}
331
332#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0
333#include "selftests/i915_gem_phys.c"
334#endif