Bug Summary

File:dev/pci/drm/i915/gem/i915_gem_stolen.c
Warning:line 817, column 18
Value stored to 'pdev' during its initialization is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name i915_gem_stolen.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2008-2012 Intel Corporation
5 */
6
7#include <linux/errno.h>
8#include <linux/mutex.h>
9
10#include <drm/drm_mm.h>
11#include <drm/i915_drm.h>
12
13#include "gem/i915_gem_lmem.h"
14#include "gem/i915_gem_region.h"
15#include "gt/intel_gt.h"
16#include "gt/intel_gt_mcr.h"
17#include "gt/intel_gt_regs.h"
18#include "gt/intel_region_lmem.h"
19#include "i915_drv.h"
20#include "i915_gem_stolen.h"
21#include "i915_pci.h"
22#include "i915_reg.h"
23#include "i915_utils.h"
24#include "i915_vgpu.h"
25#include "intel_mchbar_regs.h"
26#include "intel_pci_config.h"
27
28/*
29 * The BIOS typically reserves some of the system's memory for the exclusive
30 * use of the integrated graphics. This memory is no longer available for
31 * use by the OS and so the user finds that his system has less memory
32 * available than he put in. We refer to this memory as stolen.
33 *
34 * The BIOS will allocate its framebuffer from the stolen memory. Our
35 * goal is try to reuse that object for our own fbcon which must always
36 * be available for panics. Anything else we can reuse the stolen memory
37 * for is a boon.
38 */
39
40int i915_gem_stolen_insert_node_in_range(struct drm_i915_privateinteldrm_softc *i915,
41 struct drm_mm_node *node, u64 size,
42 unsigned alignment, u64 start, u64 end)
43{
44 int ret;
45
46 if (!drm_mm_initialized(&i915->mm.stolen))
47 return -ENODEV19;
48
49 /* WaSkipStolenMemoryFirstPage:bdw+ */
50 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8 && start < 4096)
51 start = 4096;
52
53 mutex_lock(&i915->mm.stolen_lock)rw_enter_write(&i915->mm.stolen_lock);
54 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
55 size, alignment, 0,
56 start, end, DRM_MM_INSERT_BEST);
57 mutex_unlock(&i915->mm.stolen_lock)rw_exit_write(&i915->mm.stolen_lock);
58
59 return ret;
60}
61
62int i915_gem_stolen_insert_node(struct drm_i915_privateinteldrm_softc *i915,
63 struct drm_mm_node *node, u64 size,
64 unsigned alignment)
65{
66 return i915_gem_stolen_insert_node_in_range(i915, node,
67 size, alignment,
68 I915_GEM_STOLEN_BIAS(128 << 10),
69 U64_MAX0xffffffffffffffffULL);
70}
71
72void i915_gem_stolen_remove_node(struct drm_i915_privateinteldrm_softc *i915,
73 struct drm_mm_node *node)
74{
75 mutex_lock(&i915->mm.stolen_lock)rw_enter_write(&i915->mm.stolen_lock);
76 drm_mm_remove_node(node);
77 mutex_unlock(&i915->mm.stolen_lock)rw_exit_write(&i915->mm.stolen_lock);
78}
79
80static int i915_adjust_stolen(struct drm_i915_privateinteldrm_softc *i915,
81 struct resource *dsm)
82{
83 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
84 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
85#ifdef notyet
86 struct resource *r;
87#endif
88
89 if (dsm->start == 0 || dsm->end <= dsm->start)
90 return -EINVAL22;
91
92 /*
93 * TODO: We have yet too encounter the case where the GTT wasn't at the
94 * end of stolen. With that assumption we could simplify this.
95 */
96
97 /* Make sure we don't clobber the GTT if it's within stolen memory */
98 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) <= 4 &&
99 !IS_G33(i915)IS_PLATFORM(i915, INTEL_G33) && !IS_PINEVIEW(i915)IS_PLATFORM(i915, INTEL_PINEVIEW) && !IS_G4X(i915)(IS_PLATFORM(i915, INTEL_G45) || IS_PLATFORM(i915, INTEL_GM45
))
) {
100 struct resource stolen[2] = {*dsm, *dsm};
101 struct resource ggtt_res;
102 resource_size_t ggtt_start;
103
104 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL((const i915_reg_t){ .reg = (0x02020) }));
105 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 4)
106 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK0xfffff000) |
107 (ggtt_start & PGTBL_ADDRESS_HI_MASK0x000000f0) << 28;
108 else
109 ggtt_start &= PGTBL_ADDRESS_LO_MASK0xfffff000;
110
111 ggtt_res =
112 (struct resource) DEFINE_RES_MEM(ggtt_start,{ .start = (ggtt_start), .end = (ggtt_start) + (((ggtt)->vm
.total >> 12) * 4) - 1, }
113 ggtt_total_entries(ggtt) * 4){ .start = (ggtt_start), .end = (ggtt_start) + (((ggtt)->vm
.total >> 12) * 4) - 1, }
;
114
115 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
116 stolen[0].end = ggtt_res.start;
117 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
118 stolen[1].start = ggtt_res.end;
119
120 /* Pick the larger of the two chunks */
121 if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
122 *dsm = stolen[0];
123 else
124 *dsm = stolen[1];
125
126 if (stolen[0].start != stolen[1].start ||
127 stolen[0].end != stolen[1].end) {
128 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "GTT within stolen memory at %pR\n"
, &ggtt_res)
129 "GTT within stolen memory at %pR\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "GTT within stolen memory at %pR\n"
, &ggtt_res)
130 &ggtt_res)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "GTT within stolen memory at %pR\n"
, &ggtt_res)
;
131 drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Stolen memory adjusted to %pR\n"
, dsm)
132 dsm)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Stolen memory adjusted to %pR\n"
, dsm)
;
133 }
134 }
135
136#ifdef __linux__
137 /*
138 * With stolen lmem, we don't need to check if the address range
139 * overlaps with the non-stolen system memory range, since lmem is local
140 * to the gpu.
141 */
142 if (HAS_LMEM(i915)((&(i915)->__runtime)->memory_regions & ((1UL <<
(INTEL_REGION_LMEM_0))))
)
143 return 0;
144
145 /*
146 * Verify that nothing else uses this physical address. Stolen
147 * memory should be reserved by the BIOS and hidden from the
148 * kernel. So if the region is already marked as busy, something
149 * is seriously wrong.
150 */
151 r = devm_request_mem_region(i915->drm.dev, dsm->start,
152 resource_size(dsm),
153 "Graphics Stolen Memory");
154 if (r == NULL((void *)0)) {
155 /*
156 * One more attempt but this time requesting region from
157 * start + 1, as we have seen that this resolves the region
158 * conflict with the PCI Bus.
159 * This is a BIOS w/a: Some BIOS wrap stolen in the root
160 * PCI bus, but have an off-by-one error. Hence retry the
161 * reservation starting from 1 instead of 0.
162 * There's also BIOS with off-by-one on the other end.
163 */
164 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
165 resource_size(dsm) - 2,
166 "Graphics Stolen Memory");
167 /*
168 * GEN3 firmware likes to smash pci bridges into the stolen
169 * range. Apparently this works.
170 */
171 if (!r && GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) != 3) {
172 drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "conflict detected with stolen region: %pR\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , dsm)
173 "conflict detected with stolen region: %pR\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "conflict detected with stolen region: %pR\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , dsm)
174 dsm)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "conflict detected with stolen region: %pR\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , dsm)
;
175
176 return -EBUSY16;
177 }
178 }
179#endif
180
181 return 0;
182}
183
184static void i915_gem_cleanup_stolen(struct drm_i915_privateinteldrm_softc *i915)
185{
186 if (!drm_mm_initialized(&i915->mm.stolen))
187 return;
188
189 drm_mm_takedown(&i915->mm.stolen);
190}
191
192static void g4x_get_stolen_reserved(struct drm_i915_privateinteldrm_softc *i915,
193 struct intel_uncore *uncore,
194 resource_size_t *base,
195 resource_size_t *size)
196{
197 u32 reg_val = intel_uncore_read(uncore,
198 IS_GM45(i915)IS_PLATFORM(i915, INTEL_GM45) ?
199 CTG_STOLEN_RESERVED((const i915_reg_t){ .reg = (0x10000 + 0x34) }) :
200 ELK_STOLEN_RESERVED((const i915_reg_t){ .reg = (0x10000 + 0x48) }));
201 resource_size_t stolen_top = i915->dsm.end + 1;
202
203 drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "%s_STOLEN_RESERVED = %08x\n"
, IS_PLATFORM(i915, INTEL_GM45) ? "CTG" : "ELK", reg_val)
204 IS_GM45(i915) ? "CTG" : "ELK", reg_val)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "%s_STOLEN_RESERVED = %08x\n"
, IS_PLATFORM(i915, INTEL_GM45) ? "CTG" : "ELK", reg_val)
;
205
206 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE(1 << 0)) == 0)
207 return;
208
209 /*
210 * Whether ILK really reuses the ELK register for this is unclear.
211 * Let's see if we catch anyone with this supposedly enabled on ILK.
212 */
213 drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,({ int __ret = !!(((&(i915)->__runtime)->graphics.ip
.ver) == 5); if (__ret) printf("%s %s: " "ILK stolen reserved found? 0x%08x\n"
, dev_driver_string((&i915->drm)->dev), "", reg_val
); __builtin_expect(!!(__ret), 0); })
214 "ILK stolen reserved found? 0x%08x\n",({ int __ret = !!(((&(i915)->__runtime)->graphics.ip
.ver) == 5); if (__ret) printf("%s %s: " "ILK stolen reserved found? 0x%08x\n"
, dev_driver_string((&i915->drm)->dev), "", reg_val
); __builtin_expect(!!(__ret), 0); })
215 reg_val)({ int __ret = !!(((&(i915)->__runtime)->graphics.ip
.ver) == 5); if (__ret) printf("%s %s: " "ILK stolen reserved found? 0x%08x\n"
, dev_driver_string((&i915->drm)->dev), "", reg_val
); __builtin_expect(!!(__ret), 0); })
;
216
217 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK(0xFFF << 4)))
218 return;
219
220 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK(0xFFF << 4)) << 16;
221 drm_WARN_ON(&i915->drm,({ int __ret = !!(((reg_val & (0xFFFF << 16)) < *
base)); if (__ret) printf("%s %s: " "%s", dev_driver_string((
(&i915->drm))->dev), "", "drm_WARN_ON(" "(reg_val & (0xFFFF << 16)) < *base"
")"); __builtin_expect(!!(__ret), 0); })
222 (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base)({ int __ret = !!(((reg_val & (0xFFFF << 16)) < *
base)); if (__ret) printf("%s %s: " "%s", dev_driver_string((
(&i915->drm))->dev), "", "drm_WARN_ON(" "(reg_val & (0xFFFF << 16)) < *base"
")"); __builtin_expect(!!(__ret), 0); })
;
223
224 *size = stolen_top - *base;
225}
226
227static void gen6_get_stolen_reserved(struct drm_i915_privateinteldrm_softc *i915,
228 struct intel_uncore *uncore,
229 resource_size_t *base,
230 resource_size_t *size)
231{
232 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED((const i915_reg_t){ .reg = (0x1082C0) }));
233
234 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "GEN6_STOLEN_RESERVED = %08x\n"
, reg_val)
;
235
236 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE(1 << 0)))
237 return;
238
239 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK(0xFFF << 20);
240
241 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK(3 << 4)) {
242 case GEN6_STOLEN_RESERVED_1M(0 << 4):
243 *size = 1024 * 1024;
244 break;
245 case GEN6_STOLEN_RESERVED_512K(1 << 4):
246 *size = 512 * 1024;
247 break;
248 case GEN6_STOLEN_RESERVED_256K(2 << 4):
249 *size = 256 * 1024;
250 break;
251 case GEN6_STOLEN_RESERVED_128K(3 << 4):
252 *size = 128 * 1024;
253 break;
254 default:
255 *size = 1024 * 1024;
256 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "reg_val & (3 << 4)", (long)(reg_val & (3 <<
4))); __builtin_expect(!!(__ret), 0); })
;
257 }
258}
259
260static void vlv_get_stolen_reserved(struct drm_i915_privateinteldrm_softc *i915,
261 struct intel_uncore *uncore,
262 resource_size_t *base,
263 resource_size_t *size)
264{
265 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED((const i915_reg_t){ .reg = (0x1082C0) }));
266 resource_size_t stolen_top = i915->dsm.end + 1;
267
268 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "GEN6_STOLEN_RESERVED = %08x\n"
, reg_val)
;
269
270 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE(1 << 0)))
271 return;
272
273 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK(1 << 5)) {
274 default:
275 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "reg_val & (1 << 5)", (long)(reg_val & (1 <<
5))); __builtin_expect(!!(__ret), 0); })
;
276 fallthroughdo {} while (0);
277 case GEN7_STOLEN_RESERVED_1M(0 << 5):
278 *size = 1024 * 1024;
279 break;
280 }
281
282 /*
283 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
284 * reserved location as (top - size).
285 */
286 *base = stolen_top - *size;
287}
288
289static void gen7_get_stolen_reserved(struct drm_i915_privateinteldrm_softc *i915,
290 struct intel_uncore *uncore,
291 resource_size_t *base,
292 resource_size_t *size)
293{
294 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED((const i915_reg_t){ .reg = (0x1082C0) }));
295
296 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "GEN6_STOLEN_RESERVED = %08x\n"
, reg_val)
;
297
298 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE(1 << 0)))
299 return;
300
301 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK(0x3FFF << 18);
302
303 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK(1 << 5)) {
304 case GEN7_STOLEN_RESERVED_1M(0 << 5):
305 *size = 1024 * 1024;
306 break;
307 case GEN7_STOLEN_RESERVED_256K(1 << 5):
308 *size = 256 * 1024;
309 break;
310 default:
311 *size = 1024 * 1024;
312 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "reg_val & (1 << 5)", (long)(reg_val & (1 <<
5))); __builtin_expect(!!(__ret), 0); })
;
313 }
314}
315
316static void chv_get_stolen_reserved(struct drm_i915_privateinteldrm_softc *i915,
317 struct intel_uncore *uncore,
318 resource_size_t *base,
319 resource_size_t *size)
320{
321 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED((const i915_reg_t){ .reg = (0x1082C0) }));
322
323 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "GEN6_STOLEN_RESERVED = %08x\n"
, reg_val)
;
324
325 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE(1 << 0)))
326 return;
327
328 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK(0xFFF << 20);
329
330 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK(3 << 7)) {
331 case GEN8_STOLEN_RESERVED_1M(0 << 7):
332 *size = 1024 * 1024;
333 break;
334 case GEN8_STOLEN_RESERVED_2M(1 << 7):
335 *size = 2 * 1024 * 1024;
336 break;
337 case GEN8_STOLEN_RESERVED_4M(2 << 7):
338 *size = 4 * 1024 * 1024;
339 break;
340 case GEN8_STOLEN_RESERVED_8M(3 << 7):
341 *size = 8 * 1024 * 1024;
342 break;
343 default:
344 *size = 8 * 1024 * 1024;
345 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "reg_val & (3 << 7)", (long)(reg_val & (3 <<
7))); __builtin_expect(!!(__ret), 0); })
;
346 }
347}
348
349static void bdw_get_stolen_reserved(struct drm_i915_privateinteldrm_softc *i915,
350 struct intel_uncore *uncore,
351 resource_size_t *base,
352 resource_size_t *size)
353{
354 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED((const i915_reg_t){ .reg = (0x1082C0) }));
355 resource_size_t stolen_top = i915->dsm.end + 1;
356
357 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "GEN6_STOLEN_RESERVED = %08x\n"
, reg_val)
;
358
359 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE(1 << 0)))
360 return;
361
362 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK(0xFFF << 20)))
363 return;
364
365 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK(0xFFF << 20);
366 *size = stolen_top - *base;
367}
368
369static void icl_get_stolen_reserved(struct drm_i915_privateinteldrm_softc *i915,
370 struct intel_uncore *uncore,
371 resource_size_t *base,
372 resource_size_t *size)
373{
374 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED((const i915_reg_t){ .reg = (0x1082C0) }));
375
376 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "GEN6_STOLEN_RESERVED = 0x%016llx\n"
, reg_val)
;
377
378 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK(0xFFFFFFFFFFFULL << 20);
379
380 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK(3 << 7)) {
381 case GEN8_STOLEN_RESERVED_1M(0 << 7):
382 *size = 1024 * 1024;
383 break;
384 case GEN8_STOLEN_RESERVED_2M(1 << 7):
385 *size = 2 * 1024 * 1024;
386 break;
387 case GEN8_STOLEN_RESERVED_4M(2 << 7):
388 *size = 4 * 1024 * 1024;
389 break;
390 case GEN8_STOLEN_RESERVED_8M(3 << 7):
391 *size = 8 * 1024 * 1024;
392 break;
393 default:
394 *size = 8 * 1024 * 1024;
395 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n"
, "reg_val & (3 << 7)", (long)(reg_val & (3 <<
7))); __builtin_expect(!!(__ret), 0); })
;
396 }
397}
398
399static int i915_gem_init_stolen(struct intel_memory_region *mem)
400{
401 struct drm_i915_privateinteldrm_softc *i915 = mem->i915;
402 struct intel_uncore *uncore = &i915->uncore;
403 resource_size_t reserved_base, stolen_top;
404 resource_size_t reserved_total, reserved_size;
405
406 rw_init(&i915->mm.stolen_lock, "stln")_rw_init_flags(&i915->mm.stolen_lock, "stln", 0, ((void
*)0))
;
407
408 if (intel_vgpu_active(i915)) {
409 drm_notice(&i915->drm,printf("drm:pid%d:%s *NOTICE* " "[drm] " "%s, disabling use of stolen memory\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , "iGVT-g active"
)
410 "%s, disabling use of stolen memory\n",printf("drm:pid%d:%s *NOTICE* " "[drm] " "%s, disabling use of stolen memory\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , "iGVT-g active"
)
411 "iGVT-g active")printf("drm:pid%d:%s *NOTICE* " "[drm] " "%s, disabling use of stolen memory\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , "iGVT-g active"
)
;
412 return 0;
413 }
414
415 if (i915_vtd_active(i915) && GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) < 8) {
416 drm_notice(&i915->drm,printf("drm:pid%d:%s *NOTICE* " "[drm] " "%s, disabling use of stolen memory\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , "DMAR active"
)
417 "%s, disabling use of stolen memory\n",printf("drm:pid%d:%s *NOTICE* " "[drm] " "%s, disabling use of stolen memory\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , "DMAR active"
)
418 "DMAR active")printf("drm:pid%d:%s *NOTICE* " "[drm] " "%s, disabling use of stolen memory\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , "DMAR active"
)
;
419 return 0;
420 }
421
422 if (resource_size(&mem->region) == 0)
423 return 0;
424
425 i915->dsm = mem->region;
426
427 if (i915_adjust_stolen(i915, &i915->dsm))
428 return 0;
429
430 GEM_BUG_ON(i915->dsm.start == 0)((void)0);
431 GEM_BUG_ON(i915->dsm.end <= i915->dsm.start)((void)0);
432
433 stolen_top = i915->dsm.end + 1;
434 reserved_base = stolen_top;
435 reserved_size = 0;
436
437 if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 11) {
438 icl_get_stolen_reserved(i915, uncore,
439 &reserved_base, &reserved_size);
440 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 8) {
441 if (IS_LP(i915)((&(i915)->__info)->is_lp))
442 chv_get_stolen_reserved(i915, uncore,
443 &reserved_base, &reserved_size);
444 else
445 bdw_get_stolen_reserved(i915, uncore,
446 &reserved_base, &reserved_size);
447 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 7) {
448 if (IS_VALLEYVIEW(i915)IS_PLATFORM(i915, INTEL_VALLEYVIEW))
449 vlv_get_stolen_reserved(i915, uncore,
450 &reserved_base, &reserved_size);
451 else
452 gen7_get_stolen_reserved(i915, uncore,
453 &reserved_base, &reserved_size);
454 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 6) {
455 gen6_get_stolen_reserved(i915, uncore,
456 &reserved_base, &reserved_size);
457 } else if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 5 || IS_G4X(i915)(IS_PLATFORM(i915, INTEL_G45) || IS_PLATFORM(i915, INTEL_GM45
))
) {
458 g4x_get_stolen_reserved(i915, uncore,
459 &reserved_base, &reserved_size);
460 }
461
462 /*
463 * Our expectation is that the reserved space is at the top of the
464 * stolen region and *never* at the bottom. If we see !reserved_base,
465 * it likely means we failed to read the registers correctly.
466 */
467 if (!reserved_base) {
468 drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "inconsistent reservation %pa + %pa; ignoring\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , &reserved_base
, &reserved_size)
469 "inconsistent reservation %pa + %pa; ignoring\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "inconsistent reservation %pa + %pa; ignoring\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , &reserved_base
, &reserved_size)
470 &reserved_base, &reserved_size)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "inconsistent reservation %pa + %pa; ignoring\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , &reserved_base
, &reserved_size)
;
471 reserved_base = stolen_top;
472 reserved_size = 0;
473 }
474
475 i915->dsm_reserved =
476 (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size){ .start = (reserved_base), .end = (reserved_base) + (reserved_size
) - 1, }
;
477
478#ifdef notyet
479 if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
480 drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Stolen reserved area %pR outside stolen memory %pR\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , &i915
->dsm_reserved, &i915->dsm)
481 "Stolen reserved area %pR outside stolen memory %pR\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Stolen reserved area %pR outside stolen memory %pR\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , &i915
->dsm_reserved, &i915->dsm)
482 &i915->dsm_reserved, &i915->dsm)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Stolen reserved area %pR outside stolen memory %pR\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , &i915
->dsm_reserved, &i915->dsm)
;
483 return 0;
484 }
485#endif
486
487 /* Exclude the reserved region from driver use */
488 mem->region.end = reserved_base - 1;
489 mem->io_size = min(mem->io_size, resource_size(&mem->region))(((mem->io_size)<(resource_size(&mem->region)))?
(mem->io_size):(resource_size(&mem->region)))
;
490
491 /* It is possible for the reserved area to end before the end of stolen
492 * memory, so just consider the start. */
493 reserved_total = stolen_top - reserved_base;
494
495 i915->stolen_usable_size =
496 resource_size(&i915->dsm) - reserved_total;
497
498 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Memory reserved for graphics device: %lluK, usable: %lluK\n"
, (u64)resource_size(&i915->dsm) >> 10, (u64)i915
->stolen_usable_size >> 10)
499 "Memory reserved for graphics device: %lluK, usable: %lluK\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Memory reserved for graphics device: %lluK, usable: %lluK\n"
, (u64)resource_size(&i915->dsm) >> 10, (u64)i915
->stolen_usable_size >> 10)
500 (u64)resource_size(&i915->dsm) >> 10,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Memory reserved for graphics device: %lluK, usable: %lluK\n"
, (u64)resource_size(&i915->dsm) >> 10, (u64)i915
->stolen_usable_size >> 10)
501 (u64)i915->stolen_usable_size >> 10)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Memory reserved for graphics device: %lluK, usable: %lluK\n"
, (u64)resource_size(&i915->dsm) >> 10, (u64)i915
->stolen_usable_size >> 10)
;
502
503 if (i915->stolen_usable_size == 0)
504 return 0;
505
506 /* Basic memrange allocator for stolen space. */
507 drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
508
509 return 0;
510}
511
512static void dbg_poison(struct i915_ggtt *ggtt,
513 dma_addr_t addr, resource_size_t size,
514 u8 x)
515{
516#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)0
517 if (!drm_mm_node_allocated(&ggtt->error_capture))
518 return;
519
520 if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND((int)(1UL << (10))))
521 return; /* beware stop_machine() inversion */
522
523 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE))((void)0);
524
525 mutex_lock(&ggtt->error_mutex)rw_enter_write(&ggtt->error_mutex);
526 while (size) {
527 void __iomem *s;
528
529 ggtt->vm.insert_page(&ggtt->vm, addr,
530 ggtt->error_capture.start,
531 I915_CACHE_NONE, 0);
532 mb()do { __asm volatile("mfence" ::: "memory"); } while (0);
533
534 s = io_mapping_map_wc(&ggtt->iomap,
535 ggtt->error_capture.start,
536 PAGE_SIZE(1 << 12));
537 memset_io(s, x, PAGE_SIZE)__builtin_memset((s), (x), ((1 << 12)));
538 io_mapping_unmap(s);
539
540 addr += PAGE_SIZE(1 << 12);
541 size -= PAGE_SIZE(1 << 12);
542 }
543 mb()do { __asm volatile("mfence" ::: "memory"); } while (0);
544 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE(1 << 12));
545 mutex_unlock(&ggtt->error_mutex)rw_exit_write(&ggtt->error_mutex);
546#endif
547}
548
549static struct sg_table *
550i915_pages_create_for_stolen(struct drm_device *dev,
551 resource_size_t offset, resource_size_t size)
552{
553 struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev);
554 struct sg_table *st;
555 struct scatterlist *sg;
556
557 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)))((void)0);
558
559 /* We hide that we have no struct vm_page backing our stolen object
560 * by wrapping the contiguous physical allocation with a fake
561 * dma mapping in a single scatterlist.
562 */
563
564 st = kmalloc(sizeof(*st), GFP_KERNEL(0x0001 | 0x0004));
565 if (st == NULL((void *)0))
566 return ERR_PTR(-ENOMEM12);
567
568 if (sg_alloc_table(st, 1, GFP_KERNEL(0x0001 | 0x0004))) {
569 kfree(st);
570 return ERR_PTR(-ENOMEM12);
571 }
572
573 sg = st->sgl;
574 sg->offset = 0;
575 sg->length = size;
576
577 sg_dma_address(sg)((sg)->dma_address) = (dma_addr_t)i915->dsm.start + offset;
578 sg_dma_len(sg)((sg)->length) = size;
579
580 return st;
581}
582
583static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
584{
585 struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev);
586 struct sg_table *pages =
587 i915_pages_create_for_stolen(obj->base.dev,
588 obj->stolen->start,
589 obj->stolen->size);
590 if (IS_ERR(pages))
591 return PTR_ERR(pages);
592
593 dbg_poison(to_gt(i915)->ggtt,
594 sg_dma_address(pages->sgl)((pages->sgl)->dma_address),
595 sg_dma_len(pages->sgl)((pages->sgl)->length),
596 POISON_INUSE0xdb);
597
598 __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
599
600 return 0;
601}
602
603static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
604 struct sg_table *pages)
605{
606 struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev);
607 /* Should only be called from i915_gem_object_release_stolen() */
608
609 dbg_poison(to_gt(i915)->ggtt,
610 sg_dma_address(pages->sgl)((pages->sgl)->dma_address),
611 sg_dma_len(pages->sgl)((pages->sgl)->length),
612 POISON_FREE0xdf);
613
614 sg_free_table(pages);
615 kfree(pages);
616}
617
618static void
619i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
620{
621 struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev);
622 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen)({ typeof(*&obj->stolen) __T = *(&obj->stolen);
*(&obj->stolen) = (typeof(*&obj->stolen))0; __T
; })
;
623
624 GEM_BUG_ON(!stolen)((void)0);
625 i915_gem_stolen_remove_node(i915, stolen);
626 kfree(stolen);
627
628 i915_gem_object_release_memory_region(obj);
629}
630
631static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
632 .name = "i915_gem_object_stolen",
633 .get_pages = i915_gem_object_get_pages_stolen,
634 .put_pages = i915_gem_object_put_pages_stolen,
635 .release = i915_gem_object_release_stolen,
636};
637
638static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
639 struct drm_i915_gem_object *obj,
640 struct drm_mm_node *stolen)
641{
642 static struct lock_class_key lock_class;
643 unsigned int cache_level;
644 unsigned int flags;
645 int err;
646
647 /*
648 * Stolen objects are always physically contiguous since we just
649 * allocate one big block underneath using the drm_mm range allocator.
650 */
651 flags = I915_BO_ALLOC_CONTIGUOUS(1UL << (0));
652
653 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
654 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
655
656 obj->stolen = stolen;
657 obj->read_domains = I915_GEM_DOMAIN_CPU0x00000001 | I915_GEM_DOMAIN_GTT0x00000040;
658 cache_level = HAS_LLC(mem->i915)((&(mem->i915)->__info)->has_llc) ? I915_CACHE_LLC : I915_CACHE_NONE;
659 i915_gem_object_set_cache_coherency(obj, cache_level);
660
661 if (WARN_ON(!i915_gem_object_trylock(obj, NULL))({ int __ret = !!(!i915_gem_object_trylock(obj, ((void *)0)))
; if (__ret) printf("WARNING %s failed at %s:%d\n", "!i915_gem_object_trylock(obj, ((void *)0))"
, "/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_stolen.c", 661)
; __builtin_expect(!!(__ret), 0); })
)
662 return -EBUSY16;
663
664 i915_gem_object_init_memory_region(obj, mem);
665
666 err = i915_gem_object_pin_pages(obj);
667 if (err)
668 i915_gem_object_release_memory_region(obj);
669 i915_gem_object_unlock(obj);
670
671 return err;
672}
673
674static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
675 struct drm_i915_gem_object *obj,
676 resource_size_t offset,
677 resource_size_t size,
678 resource_size_t page_size,
679 unsigned int flags)
680{
681 struct drm_i915_privateinteldrm_softc *i915 = mem->i915;
682 struct drm_mm_node *stolen;
683 int ret;
684
685 if (!drm_mm_initialized(&i915->mm.stolen))
686 return -ENODEV19;
687
688 if (size == 0)
689 return -EINVAL22;
690
691 /*
692 * With discrete devices, where we lack a mappable aperture there is no
693 * possible way to ever access this memory on the CPU side.
694 */
695 if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
696 !(flags & I915_BO_ALLOC_GPU_ONLY(1UL << (6))))
697 return -ENOSPC28;
698
699 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL(0x0001 | 0x0004));
700 if (!stolen)
701 return -ENOMEM12;
702
703 if (offset != I915_BO_INVALID_OFFSET((resource_size_t)-1)) {
704 drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n"
, &offset, &size)
705 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n"
, &offset, &size)
706 &offset, &size)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n"
, &offset, &size)
;
707
708 stolen->start = offset;
709 stolen->size = size;
710 mutex_lock(&i915->mm.stolen_lock)rw_enter_write(&i915->mm.stolen_lock);
711 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
712 mutex_unlock(&i915->mm.stolen_lock)rw_exit_write(&i915->mm.stolen_lock);
713 } else {
714 ret = i915_gem_stolen_insert_node(i915, stolen, size,
715 mem->min_page_size);
716 }
717 if (ret)
718 goto err_free;
719
720 ret = __i915_gem_object_create_stolen(mem, obj, stolen);
721 if (ret)
722 goto err_remove;
723
724 return 0;
725
726err_remove:
727 i915_gem_stolen_remove_node(i915, stolen);
728err_free:
729 kfree(stolen);
730 return ret;
731}
732
733struct drm_i915_gem_object *
734i915_gem_object_create_stolen(struct drm_i915_privateinteldrm_softc *i915,
735 resource_size_t size)
736{
737 return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
738}
739
740static int init_stolen_smem(struct intel_memory_region *mem)
741{
742 /*
743 * Initialise stolen early so that we may reserve preallocated
744 * objects for the BIOS to KMS transition.
745 */
746 return i915_gem_init_stolen(mem);
747}
748
749static int release_stolen_smem(struct intel_memory_region *mem)
750{
751 i915_gem_cleanup_stolen(mem->i915);
752 return 0;
753}
754
755static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
756 .init = init_stolen_smem,
757 .release = release_stolen_smem,
758 .init_object = _i915_gem_object_stolen_init,
759};
760
761static int init_stolen_lmem(struct intel_memory_region *mem)
762{
763 int err;
764
765 if (GEM_WARN_ON(resource_size(&mem->region) == 0)({ __builtin_expect(!!(!!(resource_size(&mem->region) ==
0)), 0); })
)
766 return -ENODEV19;
767
768 /*
769 * TODO: For stolen lmem we mostly just care about populating the dsm
770 * related bits and setting up the drm_mm allocator for the range.
771 * Perhaps split up i915_gem_init_stolen() for this.
772 */
773 err = i915_gem_init_stolen(mem);
774 if (err)
775 return err;
776
777 STUB()do { printf("%s: stub\n", __func__); } while(0);
778 return -ENOSYS78;
779#ifdef notyet
780 if (mem->io_size && !io_mapping_init_wc(&mem->iomap,
781 mem->io_start,
782 mem->io_size)) {
783 err = -EIO5;
784 goto err_cleanup;
785 }
786
787 return 0;
788
789err_cleanup:
790 i915_gem_cleanup_stolen(mem->i915);
791 return err;
792#endif
793}
794
795static int release_stolen_lmem(struct intel_memory_region *mem)
796{
797 STUB()do { printf("%s: stub\n", __func__); } while(0);
798#ifdef notyet
799 if (mem->io_size)
800 io_mapping_fini(&mem->iomap);
801#endif
802 i915_gem_cleanup_stolen(mem->i915);
803 return 0;
804}
805
806static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
807 .init = init_stolen_lmem,
808 .release = release_stolen_lmem,
809 .init_object = _i915_gem_object_stolen_init,
810};
811
812struct intel_memory_region *
813i915_gem_stolen_lmem_setup(struct drm_i915_privateinteldrm_softc *i915, u16 type,
814 u16 instance)
815{
816 struct intel_uncore *uncore = &i915->uncore;
817 struct pci_dev *pdev = i915->drm.pdev;
Value stored to 'pdev' during its initialization is never read
818 resource_size_t dsm_size, dsm_base, lmem_size;
819 struct intel_memory_region *mem;
820 resource_size_t io_start, io_size;
821 resource_size_t min_page_size;
822 pcireg_t mtype;
823 bus_addr_t lmem_start;
824 bus_size_t lmem_len;
825 int ret;
826
827 if (WARN_ON_ONCE(instance)({ static int __warned; int __ret = !!(instance); if (__ret &&
!__warned) { printf("WARNING %s failed at %s:%d\n", "instance"
, "/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_stolen.c", 827)
; __warned = 1; } __builtin_expect(!!(__ret), 0); })
)
828 return ERR_PTR(-ENODEV19);
829
830#ifdef __linux__
831 if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR2))
832 return ERR_PTR(-ENXIO6);
833#else
834 mtype = pci_mapreg_type(i915->pc, i915->tag,
835 0x10 + (4 * GEN12_LMEM_BAR2));
836 ret = pci_mapreg_info(i915->pc, i915->tag,
837 0x10 + (4 * GEN12_LMEM_BAR2), mtype, &lmem_start, &lmem_len, NULL((void *)0));
838 if (ret != 0)
839 return ERR_PTR(-ENXIO6);
840#endif
841
842 /* Use DSM base address instead for stolen memory */
843 dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE((const i915_reg_t){ .reg = (0x1080C0) }));
844 if (IS_DG1(uncore->i915)IS_PLATFORM(uncore->i915, INTEL_DG1)) {
845#ifdef __linux__
846 lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR2);
847#else
848 lmem_size = lmem_len;
849#endif
850 if (WARN_ON(lmem_size < dsm_base)({ int __ret = !!(lmem_size < dsm_base); if (__ret) printf
("WARNING %s failed at %s:%d\n", "lmem_size < dsm_base", "/usr/src/sys/dev/pci/drm/i915/gem/i915_gem_stolen.c"
, 850); __builtin_expect(!!(__ret), 0); })
)
851 return ERR_PTR(-ENODEV19);
852 } else {
853 resource_size_t lmem_range;
854
855 lmem_range = intel_gt_mcr_read_any(&i915->gt0, XEHP_TILE0_ADDR_RANGE((const i915_reg_t){ .reg = (0x4900) })) & 0xFFFF;
856 lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT8;
857 lmem_size *= SZ_1G(1 << 30);
858 }
859
860 dsm_size = lmem_size - dsm_base;
861#ifdef __linux__
862 if (pci_resource_len(pdev, GEN12_LMEM_BAR2) < lmem_size) {
863 io_start = 0;
864 io_size = 0;
865 } else {
866 io_start = pci_resource_start(pdev, GEN12_LMEM_BAR2) + dsm_base;
867 io_size = dsm_size;
868 }
869#else
870 if (lmem_len < lmem_size) {
871 io_start = 0;
872 io_size = 0;
873 } else {
874 io_start = lmem_start + dsm_base;
875 io_size = dsm_size;
876 }
877#endif
878
879 min_page_size = HAS_64K_PAGES(i915)((&(i915)->__info)->has_64k_pages) ? I915_GTT_PAGE_SIZE_64K(1ULL << (16)) :
880 I915_GTT_PAGE_SIZE_4K(1ULL << (12));
881
882 mem = intel_memory_region_create(i915, dsm_base, dsm_size,
883 min_page_size,
884 io_start, io_size,
885 type, instance,
886 &i915_region_stolen_lmem_ops);
887 if (IS_ERR(mem))
888 return mem;
889
890 /*
891 * TODO: consider creating common helper to just print all the
892 * interesting stuff from intel_memory_region, which we can use for all
893 * our probed regions.
894 */
895
896 drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Stolen Local memory IO start: %pa\n"
, &mem->io_start)
897 &mem->io_start)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Stolen Local memory IO start: %pa\n"
, &mem->io_start)
;
898 drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &dsm_base)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915->
drm)->dev : ((void *)0), DRM_UT_DRIVER, "Stolen Local DSM base: %pa\n"
, &dsm_base)
;
899
900 intel_memory_region_set_name(mem, "stolen-local");
901
902 mem->private = true1;
903
904 return mem;
905}
906
907struct intel_memory_region*
908i915_gem_stolen_smem_setup(struct drm_i915_privateinteldrm_softc *i915, u16 type,
909 u16 instance)
910{
911 struct intel_memory_region *mem;
912
913 mem = intel_memory_region_create(i915,
914 intel_graphics_stolen_res.start,
915 resource_size(&intel_graphics_stolen_res),
916 PAGE_SIZE(1 << 12), 0, 0, type, instance,
917 &i915_region_stolen_smem_ops);
918 if (IS_ERR(mem))
919 return mem;
920
921 intel_memory_region_set_name(mem, "stolen-system");
922
923 mem->private = true1;
924 return mem;
925}
926
927bool_Bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
928{
929 return obj->ops == &i915_gem_object_stolen_ops;
930}