| File: | dev/pci/drm/i915/intel_memory_region.c |
| Warning: | line 320, column 31 Value stored to 'mem' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * Copyright © 2019 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #include <linux/prandom.h> |
| 7 | |
| 8 | #include <uapi/drm/i915_drm.h> |
| 9 | |
| 10 | #include "intel_memory_region.h" |
| 11 | #include "i915_drv.h" |
| 12 | #include "i915_ttm_buddy_manager.h" |
| 13 | |
| 14 | static const struct { |
| 15 | u16 class; |
| 16 | u16 instance; |
| 17 | } intel_region_map[] = { |
| 18 | [INTEL_REGION_SMEM] = { |
| 19 | .class = INTEL_MEMORY_SYSTEM, |
| 20 | .instance = 0, |
| 21 | }, |
| 22 | [INTEL_REGION_LMEM_0] = { |
| 23 | .class = INTEL_MEMORY_LOCAL, |
| 24 | .instance = 0, |
| 25 | }, |
| 26 | [INTEL_REGION_STOLEN_SMEM] = { |
| 27 | .class = INTEL_MEMORY_STOLEN_SYSTEM, |
| 28 | .instance = 0, |
| 29 | }, |
| 30 | [INTEL_REGION_STOLEN_LMEM] = { |
| 31 | .class = INTEL_MEMORY_STOLEN_LOCAL, |
| 32 | .instance = 0, |
| 33 | }, |
| 34 | }; |
| 35 | |
| 36 | static int __iopagetest(struct intel_memory_region *mem, |
| 37 | u8 __iomem *va, int pagesize, |
| 38 | u8 value, resource_size_t offset, |
| 39 | const void *caller) |
| 40 | { |
| 41 | int byte = prandom_u32_max(pagesize); |
| 42 | u8 result[3]; |
| 43 | |
| 44 | memset_io(va, value, pagesize)__builtin_memset((va), (value), (pagesize)); /* or GPF! */ |
| 45 | wmb()do { __asm volatile("sfence" ::: "memory"); } while (0); |
| 46 | |
| 47 | result[0] = ioread8(va); |
| 48 | result[1] = ioread8(va + byte); |
| 49 | result[2] = ioread8(va + pagesize - 1); |
| 50 | if (memchr_inv(result, value, sizeof(result))) { |
| 51 | dev_err(mem->i915->drm.dev,printf("drm:pid%d:%s *ERROR* " "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , &mem ->region, &mem->io_start, &offset, caller, value , result[0], result[1], result[2]) |
| 52 | "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n",printf("drm:pid%d:%s *ERROR* " "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , &mem ->region, &mem->io_start, &offset, caller, value , result[0], result[1], result[2]) |
| 53 | &mem->region, &mem->io_start, &offset, caller,printf("drm:pid%d:%s *ERROR* " "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , &mem ->region, &mem->io_start, &offset, caller, value , result[0], result[1], result[2]) |
| 54 | value, result[0], result[1], result[2])printf("drm:pid%d:%s *ERROR* " "Failed to read back from memory region:%pR at [%pa + %pa] for %ps; wrote %x, read (%x, %x, %x)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , &mem ->region, &mem->io_start, &offset, caller, value , result[0], result[1], result[2]); |
| 55 | return -EINVAL22; |
| 56 | } |
| 57 | |
| 58 | return 0; |
| 59 | } |
| 60 | |
| 61 | static int iopagetest(struct intel_memory_region *mem, |
| 62 | resource_size_t offset, |
| 63 | const void *caller) |
| 64 | { |
| 65 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
| 66 | return -ENOSYS78; |
| 67 | #ifdef notyet |
| 68 | const u8 val[] = { 0x0, 0xa5, 0xc3, 0xf0 }; |
| 69 | void __iomem *va; |
| 70 | int err; |
| 71 | int i; |
| 72 | |
| 73 | va = ioremap_wc(mem->io_start + offset, PAGE_SIZE(1 << 12)); |
| 74 | if (!va) { |
| 75 | dev_err(mem->i915->drm.dev,printf("drm:pid%d:%s *ERROR* " "Failed to ioremap memory region [%pa + %pa] for %ps\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , &mem ->io_start, &offset, caller) |
| 76 | "Failed to ioremap memory region [%pa + %pa] for %ps\n",printf("drm:pid%d:%s *ERROR* " "Failed to ioremap memory region [%pa + %pa] for %ps\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , &mem ->io_start, &offset, caller) |
| 77 | &mem->io_start, &offset, caller)printf("drm:pid%d:%s *ERROR* " "Failed to ioremap memory region [%pa + %pa] for %ps\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , &mem ->io_start, &offset, caller); |
| 78 | return -EFAULT14; |
| 79 | } |
| 80 | |
| 81 | for (i = 0; i < ARRAY_SIZE(val)(sizeof((val)) / sizeof((val)[0])); i++) { |
| 82 | err = __iopagetest(mem, va, PAGE_SIZE(1 << 12), val[i], offset, caller); |
| 83 | if (err) |
| 84 | break; |
| 85 | |
| 86 | err = __iopagetest(mem, va, PAGE_SIZE(1 << 12), ~val[i], offset, caller); |
| 87 | if (err) |
| 88 | break; |
| 89 | } |
| 90 | |
| 91 | iounmap(va); |
| 92 | return err; |
| 93 | #endif |
| 94 | } |
| 95 | |
| 96 | static resource_size_t random_page(resource_size_t last) |
| 97 | { |
| 98 | /* Limited to low 44b (16TiB), but should suffice for a spot check */ |
| 99 | return prandom_u32_max(last >> PAGE_SHIFT12) << PAGE_SHIFT12; |
| 100 | } |
| 101 | |
| 102 | static int iomemtest(struct intel_memory_region *mem, |
| 103 | bool_Bool test_all, |
| 104 | const void *caller) |
| 105 | { |
| 106 | resource_size_t last, page; |
| 107 | int err; |
| 108 | |
| 109 | if (mem->io_size < PAGE_SIZE(1 << 12)) |
| 110 | return 0; |
| 111 | |
| 112 | last = mem->io_size - PAGE_SIZE(1 << 12); |
| 113 | |
| 114 | /* |
| 115 | * Quick test to check read/write access to the iomap (backing store). |
| 116 | * |
| 117 | * Write a byte, read it back. If the iomapping fails, we expect |
| 118 | * a GPF preventing further execution. If the backing store does not |
| 119 | * exist, the read back will return garbage. We check a couple of pages, |
| 120 | * the first and last of the specified region to confirm the backing |
| 121 | * store + iomap does cover the entire memory region; and we check |
| 122 | * a random offset within as a quick spot check for bad memory. |
| 123 | */ |
| 124 | |
| 125 | if (test_all) { |
| 126 | for (page = 0; page <= last; page += PAGE_SIZE(1 << 12)) { |
| 127 | err = iopagetest(mem, page, caller); |
| 128 | if (err) |
| 129 | return err; |
| 130 | } |
| 131 | } else { |
| 132 | err = iopagetest(mem, 0, caller); |
| 133 | if (err) |
| 134 | return err; |
| 135 | |
| 136 | err = iopagetest(mem, last, caller); |
| 137 | if (err) |
| 138 | return err; |
| 139 | |
| 140 | err = iopagetest(mem, random_page(last), caller); |
| 141 | if (err) |
| 142 | return err; |
| 143 | } |
| 144 | |
| 145 | return 0; |
| 146 | } |
| 147 | |
| 148 | struct intel_memory_region * |
| 149 | intel_memory_region_lookup(struct drm_i915_privateinteldrm_softc *i915, |
| 150 | u16 class, u16 instance) |
| 151 | { |
| 152 | struct intel_memory_region *mr; |
| 153 | int id; |
| 154 | |
| 155 | /* XXX: consider maybe converting to an rb tree at some point */ |
| 156 | for_each_memory_region(mr, i915, id)for (id = 0; id < (sizeof(((i915)->mm.regions)) / sizeof (((i915)->mm.regions)[0])); id++) if (!((mr) = (i915)-> mm.regions[id])) {} else { |
| 157 | if (mr->type == class && mr->instance == instance) |
| 158 | return mr; |
| 159 | } |
| 160 | |
| 161 | return NULL((void *)0); |
| 162 | } |
| 163 | |
| 164 | struct intel_memory_region * |
| 165 | intel_memory_region_by_type(struct drm_i915_privateinteldrm_softc *i915, |
| 166 | enum intel_memory_type mem_type) |
| 167 | { |
| 168 | struct intel_memory_region *mr; |
| 169 | int id; |
| 170 | |
| 171 | for_each_memory_region(mr, i915, id)for (id = 0; id < (sizeof(((i915)->mm.regions)) / sizeof (((i915)->mm.regions)[0])); id++) if (!((mr) = (i915)-> mm.regions[id])) {} else |
| 172 | if (mr->type == mem_type) |
| 173 | return mr; |
| 174 | |
| 175 | return NULL((void *)0); |
| 176 | } |
| 177 | |
| 178 | /** |
| 179 | * intel_memory_region_reserve - Reserve a memory range |
| 180 | * @mem: The region for which we want to reserve a range. |
| 181 | * @offset: Start of the range to reserve. |
| 182 | * @size: The size of the range to reserve. |
| 183 | * |
| 184 | * Return: 0 on success, negative error code on failure. |
| 185 | */ |
| 186 | int intel_memory_region_reserve(struct intel_memory_region *mem, |
| 187 | resource_size_t offset, |
| 188 | resource_size_t size) |
| 189 | { |
| 190 | struct ttm_resource_manager *man = mem->region_private; |
| 191 | |
| 192 | GEM_BUG_ON(mem->is_range_manager)((void)0); |
| 193 | |
| 194 | return i915_ttm_buddy_man_reserve(man, offset, size); |
| 195 | } |
| 196 | |
| 197 | void intel_memory_region_debug(struct intel_memory_region *mr, |
| 198 | struct drm_printer *printer) |
| 199 | { |
| 200 | drm_printf(printer, "%s: ", mr->name); |
| 201 | |
| 202 | if (mr->region_private) |
| 203 | ttm_resource_manager_debug(mr->region_private, printer); |
| 204 | else |
| 205 | drm_printf(printer, "total:%pa bytes\n", &mr->total); |
| 206 | } |
| 207 | |
| 208 | static int intel_memory_region_memtest(struct intel_memory_region *mem, |
| 209 | void *caller) |
| 210 | { |
| 211 | struct drm_i915_privateinteldrm_softc *i915 = mem->i915; |
| 212 | int err = 0; |
| 213 | |
| 214 | if (!mem->io_start) |
| 215 | return 0; |
| 216 | |
| 217 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)0 || i915->params.memtest) |
| 218 | err = iomemtest(mem, i915->params.memtest, caller); |
| 219 | |
| 220 | return err; |
| 221 | } |
| 222 | |
| 223 | struct intel_memory_region * |
| 224 | intel_memory_region_create(struct drm_i915_privateinteldrm_softc *i915, |
| 225 | resource_size_t start, |
| 226 | resource_size_t size, |
| 227 | resource_size_t min_page_size, |
| 228 | resource_size_t io_start, |
| 229 | resource_size_t io_size, |
| 230 | u16 type, |
| 231 | u16 instance, |
| 232 | const struct intel_memory_region_ops *ops) |
| 233 | { |
| 234 | struct intel_memory_region *mem; |
| 235 | int err; |
| 236 | |
| 237 | mem = kzalloc(sizeof(*mem), GFP_KERNEL(0x0001 | 0x0004)); |
| 238 | if (!mem) |
| 239 | return ERR_PTR(-ENOMEM12); |
| 240 | |
| 241 | mem->i915 = i915; |
| 242 | mem->region = (struct resource)DEFINE_RES_MEM(start, size){ .start = (start), .end = (start) + (size) - 1, }; |
| 243 | mem->io_start = io_start; |
| 244 | mem->io_size = io_size; |
| 245 | mem->min_page_size = min_page_size; |
| 246 | mem->ops = ops; |
| 247 | mem->total = size; |
| 248 | mem->type = type; |
| 249 | mem->instance = instance; |
| 250 | |
| 251 | rw_init(&mem->objects.lock, "memobj")_rw_init_flags(&mem->objects.lock, "memobj", 0, ((void *)0)); |
| 252 | INIT_LIST_HEAD(&mem->objects.list); |
| 253 | |
| 254 | if (ops->init) { |
| 255 | err = ops->init(mem); |
| 256 | if (err) |
| 257 | goto err_free; |
| 258 | } |
| 259 | |
| 260 | err = intel_memory_region_memtest(mem, (void *)_RET_IP___builtin_return_address(0)); |
| 261 | if (err) |
| 262 | goto err_release; |
| 263 | |
| 264 | return mem; |
| 265 | |
| 266 | err_release: |
| 267 | if (mem->ops->release) |
| 268 | mem->ops->release(mem); |
| 269 | err_free: |
| 270 | kfree(mem); |
| 271 | return ERR_PTR(err); |
| 272 | } |
| 273 | |
| 274 | void intel_memory_region_set_name(struct intel_memory_region *mem, |
| 275 | const char *fmt, ...) |
| 276 | { |
| 277 | va_list ap; |
| 278 | |
| 279 | va_start(ap, fmt)__builtin_va_start((ap), fmt); |
| 280 | vsnprintf(mem->name, sizeof(mem->name), fmt, ap); |
| 281 | va_end(ap)__builtin_va_end((ap)); |
| 282 | } |
| 283 | |
| 284 | void intel_memory_region_avail(struct intel_memory_region *mr, |
| 285 | u64 *avail, u64 *visible_avail) |
| 286 | { |
| 287 | if (mr->type == INTEL_MEMORY_LOCAL) { |
| 288 | i915_ttm_buddy_man_avail(mr->region_private, |
| 289 | avail, visible_avail); |
| 290 | *avail <<= PAGE_SHIFT12; |
| 291 | *visible_avail <<= PAGE_SHIFT12; |
| 292 | } else { |
| 293 | *avail = mr->total; |
| 294 | *visible_avail = mr->total; |
| 295 | } |
| 296 | } |
| 297 | |
| 298 | void intel_memory_region_destroy(struct intel_memory_region *mem) |
| 299 | { |
| 300 | int ret = 0; |
| 301 | |
| 302 | if (mem->ops->release) |
| 303 | ret = mem->ops->release(mem); |
| 304 | |
| 305 | #ifdef notyet |
| 306 | GEM_WARN_ON(!list_empty_careful(&mem->objects.list))({ __builtin_expect(!!(!!(!list_empty_careful(&mem->objects .list))), 0); }); |
| 307 | #endif |
| 308 | mutex_destroy(&mem->objects.lock); |
| 309 | if (!ret) |
| 310 | kfree(mem); |
| 311 | } |
| 312 | |
| 313 | /* Global memory region registration -- only slight layer inversions! */ |
| 314 | |
| 315 | int intel_memory_regions_hw_probe(struct drm_i915_privateinteldrm_softc *i915) |
| 316 | { |
| 317 | int err, i; |
| 318 | |
| 319 | for (i = 0; i < ARRAY_SIZE(i915->mm.regions)(sizeof((i915->mm.regions)) / sizeof((i915->mm.regions) [0])); i++) { |
| 320 | struct intel_memory_region *mem = ERR_PTR(-ENODEV19); |
Value stored to 'mem' during its initialization is never read | |
| 321 | u16 type, instance; |
| 322 | |
| 323 | if (!HAS_REGION(i915, BIT(i))((&(i915)->__runtime)->memory_regions & ((1UL << (i))))) |
| 324 | continue; |
| 325 | |
| 326 | type = intel_region_map[i].class; |
| 327 | instance = intel_region_map[i].instance; |
| 328 | switch (type) { |
| 329 | case INTEL_MEMORY_SYSTEM: |
| 330 | if (IS_DGFX(i915)((&(i915)->__info)->is_dgfx)) |
| 331 | mem = i915_gem_ttm_system_setup(i915, type, |
| 332 | instance); |
| 333 | else |
| 334 | mem = i915_gem_shmem_setup(i915, type, |
| 335 | instance); |
| 336 | break; |
| 337 | case INTEL_MEMORY_STOLEN_LOCAL: |
| 338 | mem = i915_gem_stolen_lmem_setup(i915, type, instance); |
| 339 | if (!IS_ERR(mem)) |
| 340 | i915->mm.stolen_region = mem; |
| 341 | break; |
| 342 | case INTEL_MEMORY_STOLEN_SYSTEM: |
| 343 | mem = i915_gem_stolen_smem_setup(i915, type, instance); |
| 344 | if (!IS_ERR(mem)) |
| 345 | i915->mm.stolen_region = mem; |
| 346 | break; |
| 347 | default: |
| 348 | continue; |
| 349 | } |
| 350 | |
| 351 | if (IS_ERR(mem)) { |
| 352 | err = PTR_ERR(mem); |
| 353 | drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to setup region(%d) type=%d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , err, type ) |
| 354 | "Failed to setup region(%d) type=%d\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to setup region(%d) type=%d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , err, type ) |
| 355 | err, type)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to setup region(%d) type=%d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , err, type ); |
| 356 | goto out_cleanup; |
| 357 | } |
| 358 | |
| 359 | mem->id = i; |
| 360 | i915->mm.regions[i] = mem; |
| 361 | } |
| 362 | |
| 363 | return 0; |
| 364 | |
| 365 | out_cleanup: |
| 366 | intel_memory_regions_driver_release(i915); |
| 367 | return err; |
| 368 | } |
| 369 | |
| 370 | void intel_memory_regions_driver_release(struct drm_i915_privateinteldrm_softc *i915) |
| 371 | { |
| 372 | int i; |
| 373 | |
| 374 | for (i = 0; i < ARRAY_SIZE(i915->mm.regions)(sizeof((i915->mm.regions)) / sizeof((i915->mm.regions) [0])); i++) { |
| 375 | struct intel_memory_region *region = |
| 376 | fetch_and_zero(&i915->mm.regions[i])({ typeof(*&i915->mm.regions[i]) __T = *(&i915-> mm.regions[i]); *(&i915->mm.regions[i]) = (typeof(*& i915->mm.regions[i]))0; __T; }); |
| 377 | |
| 378 | if (region) |
| 379 | intel_memory_region_destroy(region); |
| 380 | } |
| 381 | } |
| 382 | |
| 383 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0 |
| 384 | #include "selftests/intel_memory_region.c" |
| 385 | #include "selftests/mock_region.c" |
| 386 | #endif |