Bug Summary

File:dev/pci/drm/ttm/ttm_bo_util.c
Warning:line 189, column 22
Access to field 'pages' results in a dereference of a null pointer (loaded from variable 'ttm')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name ttm_bo_util.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/ttm/ttm_bo_util.c
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28/*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32#include <drm/ttm/ttm_bo_driver.h>
33#include <drm/ttm/ttm_placement.h>
34#include <drm/drm_vma_manager.h>
35#include <linux/io.h>
36#include <linux/highmem.h>
37#include <linux/wait.h>
38#include <linux/slab.h>
39#include <linux/vmalloc.h>
40#include <linux/module.h>
41#include <linux/dma-resv.h>
42
43struct ttm_transfer_obj {
44 struct ttm_buffer_object base;
45 struct ttm_buffer_object *bo;
46};
47
48void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49{
50 ttm_resource_free(bo, &bo->mem);
51}
52
53int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
54 struct ttm_operation_ctx *ctx,
55 struct ttm_resource *new_mem)
56{
57 struct ttm_tt *ttm = bo->ttm;
58 struct ttm_resource *old_mem = &bo->mem;
59 int ret;
60
61 if (old_mem->mem_type != TTM_PL_SYSTEM0) {
62 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
63
64 if (unlikely(ret != 0)__builtin_expect(!!(ret != 0), 0)) {
65 if (ret != -ERESTARTSYS4)
66 pr_err("Failed to expire sync object before unbinding TTM\n")printk("\0013" "Failed to expire sync object before unbinding TTM\n"
)
;
67 return ret;
68 }
69
70 ttm_bo_tt_unbind(bo);
71 ttm_bo_free_old_node(bo);
72 old_mem->mem_type = TTM_PL_SYSTEM0;
73 }
74
75 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
76 if (unlikely(ret != 0)__builtin_expect(!!(ret != 0), 0))
77 return ret;
78
79 if (new_mem->mem_type != TTM_PL_SYSTEM0) {
80
81 ret = ttm_tt_populate(bo->bdev, ttm, ctx);
82 if (unlikely(ret != 0)__builtin_expect(!!(ret != 0), 0))
83 return ret;
84
85 ret = ttm_bo_tt_bind(bo, new_mem);
86 if (unlikely(ret != 0)__builtin_expect(!!(ret != 0), 0))
87 return ret;
88 }
89
90 ttm_bo_assign_mem(bo, new_mem);
91 return 0;
92}
93EXPORT_SYMBOL(ttm_bo_move_ttm);
94
95int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
96 struct ttm_resource *mem)
97{
98 if (mem->bus.offset || mem->bus.addr)
13
Assuming field 'offset' is not equal to 0
99 return 0;
14
Returning without writing to 'mem->bus.is_iomem', which participates in a condition later
15
Returning zero, which participates in a condition later
100
101 mem->bus.is_iomem = false0;
102 if (!bdev->driver->io_mem_reserve)
103 return 0;
104
105 return bdev->driver->io_mem_reserve(bdev, mem);
106}
107
108void ttm_mem_io_free(struct ttm_bo_device *bdev,
109 struct ttm_resource *mem)
110{
111 if (!mem->bus.offset && !mem->bus.addr)
112 return;
113
114 if (bdev->driver->io_mem_free)
115 bdev->driver->io_mem_free(bdev, mem);
116
117 mem->bus.offset = 0;
118 mem->bus.addr = NULL((void *)0);
119}
120
121static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
122 struct ttm_resource *mem,
123 void **virtual)
124{
125 int ret;
126 void *addr;
127 int flags;
128
129 *virtual = NULL((void *)0);
130 ret = ttm_mem_io_reserve(bdev, mem);
12
Calling 'ttm_mem_io_reserve'
16
Returning from 'ttm_mem_io_reserve'
131 if (ret
4.1
'ret' is 0
16.1
'ret' is 0
|| !mem->bus.is_iomem)
5
Assuming field 'is_iomem' is true
6
Taking false branch
17
Assuming field 'is_iomem' is false
18
Taking true branch
132 return ret;
19
Returning zero (loaded from 'ret'), which participates in a condition later
133
134 if (mem->bus.addr
6.1
Field 'addr' is non-null
) {
7
Taking true branch
135 addr = mem->bus.addr;
136 } else {
137 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT12;
138
139 if (mem->placement & TTM_PL_FLAG_WC(1 << 18))
140 flags = BUS_SPACE_MAP_PREFETCHABLE0x0008;
141 else
142 flags = 0;
143
144 if (bus_space_map(bdev->memt, mem->bus.offset,
145 bus_size, BUS_SPACE_MAP_LINEAR0x0002 | flags,
146 &mem->bus.bsh)) {
147 printf("%s bus_space_map failed\n", __func__);
148 return -ENOMEM12;
149 }
150
151 addr = bus_space_vaddr(bdev->memt, mem->bus.bsh)((bdev->memt)->vaddr((mem->bus.bsh)));
152
153 if (!addr) {
154 ttm_mem_io_free(bdev, mem);
155 return -ENOMEM12;
156 }
157 }
158 *virtual = addr;
159 return 0;
8
Returning zero, which participates in a condition later
160}
161
162static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
163 struct ttm_resource *mem,
164 void *virtual)
165{
166 if (virtual && mem->bus.addr == NULL((void *)0))
167 bus_space_unmap(bdev->memt, mem->bus.bsh,
168 (size_t)mem->num_pages << PAGE_SHIFT12);
169 ttm_mem_io_free(bdev, mem);
170}
171
172static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
173{
174 uint32_t *dstP =
175 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT12));
176 uint32_t *srcP =
177 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT12));
178
179 int i;
180 for (i = 0; i < PAGE_SIZE(1 << 12) / sizeof(uint32_t); ++i)
181 iowrite32(ioread32(srcP++), dstP++);
182 return 0;
183}
184
185static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
186 unsigned long page,
187 pgprot_t prot)
188{
189 struct vm_page *d = ttm->pages[page];
31
Access to field 'pages' results in a dereference of a null pointer (loaded from variable 'ttm')
190 void *dst;
191
192 if (!d)
193 return -ENOMEM12;
194
195 src = (void *)((unsigned long)src + (page << PAGE_SHIFT12));
196 dst = kmap_atomic_prot(d, prot);
197 if (!dst)
198 return -ENOMEM12;
199
200 memcpy_fromio(dst, src, PAGE_SIZE)__builtin_memcpy((dst), (src), ((1 << 12)));
201
202 kunmap_atomic(dst);
203
204 return 0;
205}
206
207static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
208 unsigned long page,
209 pgprot_t prot)
210{
211 struct vm_page *s = ttm->pages[page];
212 void *src;
213
214 if (!s)
215 return -ENOMEM12;
216
217 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT12));
218 src = kmap_atomic_prot(s, prot);
219 if (!src)
220 return -ENOMEM12;
221
222 memcpy_toio(dst, src, PAGE_SIZE)__builtin_memcpy((dst), (src), ((1 << 12)));
223
224 kunmap_atomic(src);
225
226 return 0;
227}
228
229int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
230 struct ttm_operation_ctx *ctx,
231 struct ttm_resource *new_mem)
232{
233 struct ttm_bo_device *bdev = bo->bdev;
234 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
235 struct ttm_tt *ttm = bo->ttm;
1
'ttm' initialized here
236 struct ttm_resource *old_mem = &bo->mem;
237 struct ttm_resource old_copy = *old_mem;
238 void *old_iomap;
239 void *new_iomap;
240 int ret;
241 unsigned long i;
242 unsigned long page;
243 unsigned long add = 0;
244 int dir;
245
246 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
247 if (ret)
2
Assuming 'ret' is 0
3
Taking false branch
248 return ret;
249
250 ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
4
Calling 'ttm_resource_ioremap'
9
Returning from 'ttm_resource_ioremap'
251 if (ret
9.1
'ret' is 0
)
10
Taking false branch
252 return ret;
253 ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
11
Calling 'ttm_resource_ioremap'
20
Returning from 'ttm_resource_ioremap'
254 if (ret
20.1
'ret' is 0
)
21
Taking false branch
255 goto out;
256
257 /*
258 * Single TTM move. NOP.
259 */
260 if (old_iomap
21.1
'old_iomap' is not equal to NULL
== NULL((void *)0) && new_iomap == NULL((void *)0))
261 goto out2;
262
263 /*
264 * Don't move nonexistent data. Clear destination instead.
265 */
266 if (old_iomap
21.2
'old_iomap' is not equal to NULL
== NULL((void *)0) &&
267 (ttm == NULL((void *)0) || (!ttm_tt_is_populated(ttm) &&
268 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED(1 << 4))))) {
269 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE)__builtin_memset((new_iomap), (0), (new_mem->num_pages*(1 <<
12)))
;
270 goto out2;
271 }
272
273 /*
274 * TTM might be null for moves within the same region.
275 */
276 if (ttm) {
22
Assuming 'ttm' is null
23
Taking false branch
277 ret = ttm_tt_populate(bdev, ttm, ctx);
278 if (ret)
279 goto out1;
280 }
281
282 add = 0;
283 dir = 1;
284
285 if ((old_mem->mem_type == new_mem->mem_type) &&
24
Assuming 'old_mem->mem_type' is not equal to 'new_mem->mem_type'
286 (new_mem->start < old_mem->start + old_mem->size)) {
287 dir = -1;
288 add = new_mem->num_pages - 1;
289 }
290
291 for (i = 0; i < new_mem->num_pages; ++i) {
25
Assuming 'i' is < field 'num_pages'
26
Loop condition is true. Entering loop body
292 page = i * dir + add;
293 if (old_iomap
26.1
'old_iomap' is not equal to NULL
== NULL((void *)0)) {
27
Taking false branch
294 pgprot_t prot = ttm_io_prot(old_mem->placement,
295 PAGE_KERNEL0);
296 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
297 prot);
298 } else if (new_iomap
27.1
'new_iomap' is equal to NULL
== NULL((void *)0)) {
28
Taking true branch
299 pgprot_t prot = ttm_io_prot(new_mem->placement,
300 PAGE_KERNEL0);
301 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
29
Passing null pointer value via 1st parameter 'ttm'
30
Calling 'ttm_copy_io_ttm_page'
302 prot);
303 } else {
304 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
305 }
306 if (ret)
307 goto out1;
308 }
309 mb()do { __asm volatile("mfence" ::: "memory"); } while (0);
310out2:
311 old_copy = *old_mem;
312
313 ttm_bo_assign_mem(bo, new_mem);
314
315 if (!man->use_tt)
316 ttm_bo_tt_destroy(bo);
317
318out1:
319 ttm_resource_iounmap(bdev, old_mem, new_iomap);
320out:
321 ttm_resource_iounmap(bdev, &old_copy, old_iomap);
322
323 /*
324 * On error, keep the mm node!
325 */
326 if (!ret)
327 ttm_resource_free(bo, &old_copy);
328 return ret;
329}
330EXPORT_SYMBOL(ttm_bo_move_memcpy);
331
332static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
333{
334 struct ttm_transfer_obj *fbo;
335
336 fbo = container_of(bo, struct ttm_transfer_obj, base)({ const __typeof( ((struct ttm_transfer_obj *)0)->base ) *
__mptr = (bo); (struct ttm_transfer_obj *)( (char *)__mptr - __builtin_offsetof
(struct ttm_transfer_obj, base) );})
;
337 ttm_bo_put(fbo->bo);
338 kfree(fbo);
339}
340
341/**
342 * ttm_buffer_object_transfer
343 *
344 * @bo: A pointer to a struct ttm_buffer_object.
345 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
346 * holding the data of @bo with the old placement.
347 *
348 * This is a utility function that may be called after an accelerated move
349 * has been scheduled. A new buffer object is created as a placeholder for
350 * the old data while it's being copied. When that buffer object is idle,
351 * it can be destroyed, releasing the space of the old placement.
352 * Returns:
353 * !0: Failure.
354 */
355
356static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
357 struct ttm_buffer_object **new_obj)
358{
359 struct ttm_transfer_obj *fbo;
360 int ret;
361
362 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL(0x0001 | 0x0004));
363 if (!fbo)
364 return -ENOMEM12;
365
366 fbo->base = *bo;
367 fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT(1 << 21);
368
369 ttm_bo_get(bo);
370 fbo->bo = bo;
371
372 /**
373 * Fix up members that we shouldn't copy directly:
374 * TODO: Explicit member copy would probably be better here.
375 */
376
377 atomic_inc(&ttm_bo_glob.bo_count)__sync_fetch_and_add(&ttm_bo_glob.bo_count, 1);
378 INIT_LIST_HEAD(&fbo->base.ddestroy);
379 INIT_LIST_HEAD(&fbo->base.lru);
380 INIT_LIST_HEAD(&fbo->base.swap);
381 fbo->base.moving = NULL((void *)0);
382 drm_vma_node_reset(&fbo->base.base.vma_node);
383
384 kref_init(&fbo->base.kref);
385 fbo->base.destroy = &ttm_transfered_destroy;
386 fbo->base.acc_size = 0;
387 if (bo->type != ttm_bo_type_sg)
388 fbo->base.base.resv = &fbo->base.base._resv;
389
390 dma_resv_init(&fbo->base.base._resv);
391 fbo->base.base.dev = NULL((void *)0);
392 ret = dma_resv_trylock(&fbo->base.base._resv);
393 WARN_ON(!ret)({ int __ret = !!(!ret); if (__ret) printf("WARNING %s failed at %s:%d\n"
, "!ret", "/usr/src/sys/dev/pci/drm/ttm/ttm_bo_util.c", 393);
__builtin_expect(!!(__ret), 0); })
;
394
395 *new_obj = &fbo->base;
396 return 0;
397}
398
399#ifdef __linux__
400pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
401{
402 /* Cached mappings need no adjustment */
403 if (caching_flags & TTM_PL_FLAG_CACHED(1 << 16))
404 return tmp;
405
406#if defined(__i386__) || defined(__x86_64__1)
407 if (caching_flags & TTM_PL_FLAG_WC(1 << 18))
408 tmp = pgprot_writecombine(tmp);
409 else if (boot_cpu_data.x86 > 3)
410 tmp = pgprot_noncached(tmp);
411#endif
412#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
413 defined(__powerpc__) || defined(__mips__)
414 if (caching_flags & TTM_PL_FLAG_WC(1 << 18))
415 tmp = pgprot_writecombine(tmp);
416 else
417 tmp = pgprot_noncached(tmp);
418#endif
419#if defined(__sparc__)
420 tmp = pgprot_noncached(tmp);
421#endif
422 return tmp;
423}
424EXPORT_SYMBOL(ttm_io_prot);
425#endif
426
427pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
428{
429 /* Cached mappings need no adjustment */
430 if (caching_flags & TTM_PL_FLAG_CACHED(1 << 16))
431 return tmp;
432
433 if (caching_flags & TTM_PL_FLAG_WC(1 << 18))
434 tmp = pgprot_writecombine(tmp);
435 else
436 tmp = pgprot_noncached(tmp);
437
438 return tmp;
439}
440
441static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
442 unsigned long offset,
443 unsigned long size,
444 struct ttm_bo_kmap_obj *map)
445{
446 int flags;
447 struct ttm_resource *mem = &bo->mem;
448
449 if (bo->mem.bus.addr) {
450 map->bo_kmap_type = ttm_bo_map_premapped;
451 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
452 } else {
453 map->bo_kmap_type = ttm_bo_map_iomap;
454 if (mem->placement & TTM_PL_FLAG_WC(1 << 18))
455 flags = BUS_SPACE_MAP_PREFETCHABLE0x0008;
456 else
457 flags = 0;
458 if (bus_space_map(bo->bdev->memt,
459 bo->mem.bus.offset + offset,
460 size, BUS_SPACE_MAP_LINEAR0x0002 | flags,
461 &bo->mem.bus.bsh)) {
462 printf("%s bus_space_map failed\n", __func__);
463 map->virtual = 0;
464 } else
465 map->virtual = bus_space_vaddr(bo->bdev->memt,((bo->bdev->memt)->vaddr((bo->mem.bus.bsh)))
466 bo->mem.bus.bsh)((bo->bdev->memt)->vaddr((bo->mem.bus.bsh)));
467 }
468 return (!map->virtual) ? -ENOMEM12 : 0;
469}
470
471static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
472 unsigned long start_page,
473 unsigned long num_pages,
474 struct ttm_bo_kmap_obj *map)
475{
476 struct ttm_resource *mem = &bo->mem;
477 struct ttm_operation_ctx ctx = {
478 .interruptible = false0,
479 .no_wait_gpu = false0
480 };
481 struct ttm_tt *ttm = bo->ttm;
482 pgprot_t prot;
483 int ret;
484
485 BUG_ON(!ttm)((!(!ttm)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/ttm/ttm_bo_util.c"
, 485, "!(!ttm)"))
;
486
487 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
488 if (ret)
489 return ret;
490
491 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED(1 << 16))) {
492 /*
493 * We're mapping a single page, and the desired
494 * page protection is consistent with the bo.
495 */
496
497 map->bo_kmap_type = ttm_bo_map_kmap;
498 map->page = ttm->pages[start_page];
499 map->virtual = kmap(map->page);
500 } else {
501 /*
502 * We need to use vmap to get the desired page protection
503 * or to make the buffer object look contiguous.
504 */
505 prot = ttm_io_prot(mem->placement, PAGE_KERNEL0);
506 map->bo_kmap_type = ttm_bo_map_vmap;
507 map->virtual = vmap(ttm->pages + start_page, num_pages,
508 0, prot);
509 }
510 return (!map->virtual) ? -ENOMEM12 : 0;
511}
512
513int ttm_bo_kmap(struct ttm_buffer_object *bo,
514 unsigned long start_page, unsigned long num_pages,
515 struct ttm_bo_kmap_obj *map)
516{
517 unsigned long offset, size;
518 int ret;
519
520 map->virtual = NULL((void *)0);
521 map->bo = bo;
522 if (num_pages > bo->num_pages)
523 return -EINVAL22;
524 if (start_page > bo->num_pages)
525 return -EINVAL22;
526
527 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
528 if (ret)
529 return ret;
530 if (!bo->mem.bus.is_iomem) {
531 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
532 } else {
533 offset = start_page << PAGE_SHIFT12;
534 size = num_pages << PAGE_SHIFT12;
535 return ttm_bo_ioremap(bo, offset, size, map);
536 }
537}
538EXPORT_SYMBOL(ttm_bo_kmap);
539
540void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
541{
542 if (!map->virtual)
543 return;
544 switch (map->bo_kmap_type) {
545 case ttm_bo_map_iomap:
546 bus_space_unmap(map->bo->bdev->memt, map->bo->mem.bus.bsh,
547 (size_t)map->bo->mem.num_pages << PAGE_SHIFT12);
548 break;
549 case ttm_bo_map_vmap:
550 vunmap(map->virtual,
551 (size_t)map->bo->mem.num_pages << PAGE_SHIFT12);
552 break;
553 case ttm_bo_map_kmap:
554 kunmap_va(map->virtual);
555 break;
556 case ttm_bo_map_premapped:
557 break;
558 default:
559 BUG()do { panic("BUG at %s:%d", "/usr/src/sys/dev/pci/drm/ttm/ttm_bo_util.c"
, 559); } while (0)
;
560 }
561 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
562 map->virtual = NULL((void *)0);
563 map->page = NULL((void *)0);
564}
565EXPORT_SYMBOL(ttm_bo_kunmap);
566
567static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
568 bool_Bool dst_use_tt)
569{
570 int ret;
571 ret = ttm_bo_wait(bo, false0, false0);
572 if (ret)
573 return ret;
574
575 if (!dst_use_tt)
576 ttm_bo_tt_destroy(bo);
577 ttm_bo_free_old_node(bo);
578 return 0;
579}
580
581static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
582 struct dma_fence *fence,
583 bool_Bool dst_use_tt)
584{
585 struct ttm_buffer_object *ghost_obj;
586 int ret;
587
588 /**
589 * This should help pipeline ordinary buffer moves.
590 *
591 * Hang old buffer memory on a new buffer object,
592 * and leave it to be released when the GPU
593 * operation has completed.
594 */
595
596 dma_fence_put(bo->moving);
597 bo->moving = dma_fence_get(fence);
598
599 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
600 if (ret)
601 return ret;
602
603 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
604
605 /**
606 * If we're not moving to fixed memory, the TTM object
607 * needs to stay alive. Otherwhise hang it on the ghost
608 * bo to be unbound and destroyed.
609 */
610
611 if (dst_use_tt)
612 ghost_obj->ttm = NULL((void *)0);
613 else
614 bo->ttm = NULL((void *)0);
615
616 dma_resv_unlock(&ghost_obj->base._resv);
617 ttm_bo_put(ghost_obj);
618 return 0;
619}
620
621static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
622 struct dma_fence *fence)
623{
624 struct ttm_bo_device *bdev = bo->bdev;
625 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
626
627 /**
628 * BO doesn't have a TTM we need to bind/unbind. Just remember
629 * this eviction and free up the allocation
630 */
631 spin_lock(&from->move_lock)mtx_enter(&from->move_lock);
632 if (!from->move || dma_fence_is_later(fence, from->move)) {
633 dma_fence_put(from->move);
634 from->move = dma_fence_get(fence);
635 }
636 spin_unlock(&from->move_lock)mtx_leave(&from->move_lock);
637
638 ttm_bo_free_old_node(bo);
639
640 dma_fence_put(bo->moving);
641 bo->moving = dma_fence_get(fence);
642}
643
644int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
645 struct dma_fence *fence,
646 bool_Bool evict,
647 bool_Bool pipeline,
648 struct ttm_resource *new_mem)
649{
650 struct ttm_bo_device *bdev = bo->bdev;
651 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
652 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
653 int ret = 0;
654
655 dma_resv_add_excl_fence(bo->base.resv, fence);
656 if (!evict)
657 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
658 else if (!from->use_tt && pipeline)
659 ttm_bo_move_pipeline_evict(bo, fence);
660 else
661 ret = ttm_bo_wait_free_node(bo, man->use_tt);
662
663 if (ret)
664 return ret;
665
666 ttm_bo_assign_mem(bo, new_mem);
667
668 return 0;
669}
670EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
671
672int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
673{
674 struct ttm_buffer_object *ghost;
675 int ret;
676
677 ret = ttm_buffer_object_transfer(bo, &ghost);
678 if (ret)
679 return ret;
680
681 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
682 /* Last resort, wait for the BO to be idle when we are OOM */
683 if (ret)
684 ttm_bo_wait(bo, false0, false0);
685
686 memset(&bo->mem, 0, sizeof(bo->mem))__builtin_memset((&bo->mem), (0), (sizeof(bo->mem))
)
;
687 bo->mem.mem_type = TTM_PL_SYSTEM0;
688 bo->ttm = NULL((void *)0);
689
690 dma_resv_unlock(&ghost->base._resv);
691 ttm_bo_put(ghost);
692
693 return 0;
694}