| File: | dev/pci/drm/include/linux/atomic.h |
| Warning: | line 291, column 42 The left operand of '&' is a garbage value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | ||||||
| 2 | * Copyright © 2008-2015 Intel Corporation | ||||||
| 3 | * | ||||||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||||||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||||||
| 6 | * to deal in the Software without restriction, including without limitation | ||||||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||||||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||||||
| 10 | * | ||||||
| 11 | * The above copyright notice and this permission notice (including the next | ||||||
| 12 | * paragraph) shall be included in all copies or substantial portions of the | ||||||
| 13 | * Software. | ||||||
| 14 | * | ||||||
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||||||
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||||
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||||
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||||
| 21 | * IN THE SOFTWARE. | ||||||
| 22 | * | ||||||
| 23 | * Authors: | ||||||
| 24 | * Eric Anholt <eric@anholt.net> | ||||||
| 25 | * | ||||||
| 26 | */ | ||||||
| 27 | |||||||
| 28 | #include <linux/dma-fence-array.h> | ||||||
| 29 | #include <linux/kthread.h> | ||||||
| 30 | #include <linux/dma-resv.h> | ||||||
| 31 | #include <linux/shmem_fs.h> | ||||||
| 32 | #include <linux/slab.h> | ||||||
| 33 | #include <linux/stop_machine.h> | ||||||
| 34 | #include <linux/swap.h> | ||||||
| 35 | #include <linux/pci.h> | ||||||
| 36 | #include <linux/dma-buf.h> | ||||||
| 37 | #include <linux/mman.h> | ||||||
| 38 | |||||||
| 39 | #include <drm/drm_cache.h> | ||||||
| 40 | #include <drm/drm_vma_manager.h> | ||||||
| 41 | |||||||
| 42 | #include <dev/pci/agpvar.h> | ||||||
| 43 | |||||||
| 44 | #include "display/intel_display.h" | ||||||
| 45 | #include "display/intel_frontbuffer.h" | ||||||
| 46 | |||||||
| 47 | #include "gem/i915_gem_clflush.h" | ||||||
| 48 | #include "gem/i915_gem_context.h" | ||||||
| 49 | #include "gem/i915_gem_ioctls.h" | ||||||
| 50 | #include "gem/i915_gem_mman.h" | ||||||
| 51 | #include "gem/i915_gem_pm.h" | ||||||
| 52 | #include "gem/i915_gem_region.h" | ||||||
| 53 | #include "gem/i915_gem_userptr.h" | ||||||
| 54 | #include "gt/intel_engine_user.h" | ||||||
| 55 | #include "gt/intel_gt.h" | ||||||
| 56 | #include "gt/intel_gt_pm.h" | ||||||
| 57 | #include "gt/intel_workarounds.h" | ||||||
| 58 | |||||||
| 59 | #include "i915_drv.h" | ||||||
| 60 | #include "i915_file_private.h" | ||||||
| 61 | #include "i915_trace.h" | ||||||
| 62 | #include "i915_vgpu.h" | ||||||
| 63 | #include "intel_pm.h" | ||||||
| 64 | |||||||
| 65 | static int | ||||||
| 66 | insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size) | ||||||
| 67 | { | ||||||
| 68 | int err; | ||||||
| 69 | |||||||
| 70 | err = mutex_lock_interruptible(&ggtt->vm.mutex); | ||||||
| 71 | if (err) | ||||||
| 72 | return err; | ||||||
| 73 | |||||||
| 74 | memset(node, 0, sizeof(*node))__builtin_memset((node), (0), (sizeof(*node))); | ||||||
| 75 | err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node, | ||||||
| 76 | size, 0, I915_COLOR_UNEVICTABLE(-1), | ||||||
| 77 | 0, ggtt->mappable_end, | ||||||
| 78 | DRM_MM_INSERT_LOW); | ||||||
| 79 | |||||||
| 80 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); | ||||||
| 81 | |||||||
| 82 | return err; | ||||||
| 83 | } | ||||||
| 84 | |||||||
| 85 | static void | ||||||
| 86 | remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node) | ||||||
| 87 | { | ||||||
| 88 | mutex_lock(&ggtt->vm.mutex)rw_enter_write(&ggtt->vm.mutex); | ||||||
| 89 | drm_mm_remove_node(node); | ||||||
| 90 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); | ||||||
| 91 | } | ||||||
| 92 | |||||||
| 93 | int | ||||||
| 94 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | ||||||
| 95 | struct drm_file *file) | ||||||
| 96 | { | ||||||
| 97 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); | ||||||
| 98 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; | ||||||
| 99 | struct drm_i915_gem_get_aperture *args = data; | ||||||
| 100 | struct i915_vma *vma; | ||||||
| 101 | u64 pinned; | ||||||
| 102 | |||||||
| 103 | if (mutex_lock_interruptible(&ggtt->vm.mutex)) | ||||||
| 104 | return -EINTR4; | ||||||
| 105 | |||||||
| 106 | pinned = ggtt->vm.reserved; | ||||||
| 107 | list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)for (vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link ) *__mptr = ((&ggtt->vm.bound_list)->next); (__typeof (*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma) , vm_link) );}); &vma->vm_link != (&ggtt->vm.bound_list ); vma = ({ const __typeof( ((__typeof(*vma) *)0)->vm_link ) *__mptr = (vma->vm_link.next); (__typeof(*vma) *)( (char *)__mptr - __builtin_offsetof(__typeof(*vma), vm_link) );})) | ||||||
| 108 | if (i915_vma_is_pinned(vma)) | ||||||
| 109 | pinned += vma->node.size; | ||||||
| 110 | |||||||
| 111 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); | ||||||
| 112 | |||||||
| 113 | args->aper_size = ggtt->vm.total; | ||||||
| 114 | args->aper_available_size = args->aper_size - pinned; | ||||||
| 115 | |||||||
| 116 | return 0; | ||||||
| 117 | } | ||||||
| 118 | |||||||
| 119 | int i915_gem_object_unbind(struct drm_i915_gem_object *obj, | ||||||
| 120 | unsigned long flags) | ||||||
| 121 | { | ||||||
| 122 | struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm; | ||||||
| 123 | bool_Bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK(1UL << (3))); | ||||||
| 124 | DRM_LIST_HEAD(still_in_list)struct list_head still_in_list = { &(still_in_list), & (still_in_list) }; | ||||||
| 125 | intel_wakeref_t wakeref; | ||||||
| 126 | struct i915_vma *vma; | ||||||
| 127 | int ret; | ||||||
| 128 | |||||||
| 129 | assert_object_held(obj)do { (void)(&((obj)->base.resv)->lock.base); } while (0); | ||||||
| 130 | |||||||
| 131 | if (list_empty(&obj->vma.list)) | ||||||
| 132 | return 0; | ||||||
| 133 | |||||||
| 134 | /* | ||||||
| 135 | * As some machines use ACPI to handle runtime-resume callbacks, and | ||||||
| 136 | * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex | ||||||
| 137 | * as they are required by the shrinker. Ergo, we wake the device up | ||||||
| 138 | * first just in case. | ||||||
| 139 | */ | ||||||
| 140 | wakeref = intel_runtime_pm_get(rpm); | ||||||
| 141 | |||||||
| 142 | try_again: | ||||||
| 143 | ret = 0; | ||||||
| 144 | spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock); | ||||||
| 145 | while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,(list_empty(&obj->vma.list) ? ((void *)0) : ({ const __typeof ( ((struct i915_vma *)0)->obj_link ) *__mptr = ((&obj-> vma.list)->next); (struct i915_vma *)( (char *)__mptr - __builtin_offsetof (struct i915_vma, obj_link) );})) | ||||||
| 146 | struct i915_vma,(list_empty(&obj->vma.list) ? ((void *)0) : ({ const __typeof ( ((struct i915_vma *)0)->obj_link ) *__mptr = ((&obj-> vma.list)->next); (struct i915_vma *)( (char *)__mptr - __builtin_offsetof (struct i915_vma, obj_link) );})) | ||||||
| 147 | obj_link)(list_empty(&obj->vma.list) ? ((void *)0) : ({ const __typeof ( ((struct i915_vma *)0)->obj_link ) *__mptr = ((&obj-> vma.list)->next); (struct i915_vma *)( (char *)__mptr - __builtin_offsetof (struct i915_vma, obj_link) );})))) { | ||||||
| 148 | list_move_tail(&vma->obj_link, &still_in_list); | ||||||
| 149 | if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK(((int)(1UL << (10))) | ((int)(1UL << (11)))))) | ||||||
| 150 | continue; | ||||||
| 151 | |||||||
| 152 | if (flags & I915_GEM_OBJECT_UNBIND_TEST(1UL << (2))) { | ||||||
| 153 | ret = -EBUSY16; | ||||||
| 154 | break; | ||||||
| 155 | } | ||||||
| 156 | |||||||
| 157 | /* | ||||||
| 158 | * Requiring the vm destructor to take the object lock | ||||||
| 159 | * before destroying a vma would help us eliminate the | ||||||
| 160 | * i915_vm_tryget() here, AND thus also the barrier stuff | ||||||
| 161 | * at the end. That's an easy fix, but sleeping locks in | ||||||
| 162 | * a kthread should generally be avoided. | ||||||
| 163 | */ | ||||||
| 164 | ret = -EAGAIN35; | ||||||
| 165 | if (!i915_vm_tryget(vma->vm)) | ||||||
| 166 | break; | ||||||
| 167 | |||||||
| 168 | spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock); | ||||||
| 169 | |||||||
| 170 | /* | ||||||
| 171 | * Since i915_vma_parked() takes the object lock | ||||||
| 172 | * before vma destruction, it won't race us here, | ||||||
| 173 | * and destroy the vma from under us. | ||||||
| 174 | */ | ||||||
| 175 | |||||||
| 176 | ret = -EBUSY16; | ||||||
| 177 | if (flags & I915_GEM_OBJECT_UNBIND_ASYNC(1UL << (4))) { | ||||||
| 178 | assert_object_held(vma->obj)do { (void)(&((vma->obj)->base.resv)->lock.base) ; } while(0); | ||||||
| 179 | ret = i915_vma_unbind_async(vma, vm_trylock); | ||||||
| 180 | } | ||||||
| 181 | |||||||
| 182 | if (ret == -EBUSY16 && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE(1UL << (0)) || | ||||||
| 183 | !i915_vma_is_active(vma))) { | ||||||
| 184 | if (vm_trylock) { | ||||||
| 185 | if (mutex_trylock(&vma->vm->mutex)(rw_enter(&vma->vm->mutex, 0x0001UL | 0x0040UL) == 0 )) { | ||||||
| 186 | ret = __i915_vma_unbind(vma); | ||||||
| 187 | mutex_unlock(&vma->vm->mutex)rw_exit_write(&vma->vm->mutex); | ||||||
| 188 | } | ||||||
| 189 | } else { | ||||||
| 190 | ret = i915_vma_unbind(vma); | ||||||
| 191 | } | ||||||
| 192 | } | ||||||
| 193 | |||||||
| 194 | i915_vm_put(vma->vm); | ||||||
| 195 | spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock); | ||||||
| 196 | } | ||||||
| 197 | list_splice_init(&still_in_list, &obj->vma.list); | ||||||
| 198 | spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock); | ||||||
| 199 | |||||||
| 200 | if (ret == -EAGAIN35 && flags & I915_GEM_OBJECT_UNBIND_BARRIER(1UL << (1))) { | ||||||
| 201 | rcu_barrier()__asm volatile("" : : : "memory"); /* flush the i915_vm_release() */ | ||||||
| 202 | goto try_again; | ||||||
| 203 | } | ||||||
| 204 | |||||||
| 205 | intel_runtime_pm_put(rpm, wakeref); | ||||||
| 206 | |||||||
| 207 | return ret; | ||||||
| 208 | } | ||||||
| 209 | |||||||
| 210 | static int | ||||||
| 211 | shmem_pread(struct vm_page *page, int offset, int len, char __user *user_data, | ||||||
| 212 | bool_Bool needs_clflush) | ||||||
| 213 | { | ||||||
| 214 | char *vaddr; | ||||||
| 215 | int ret; | ||||||
| 216 | |||||||
| 217 | vaddr = kmap(page); | ||||||
| 218 | |||||||
| 219 | if (needs_clflush) | ||||||
| 220 | drm_clflush_virt_range(vaddr + offset, len); | ||||||
| 221 | |||||||
| 222 | ret = __copy_to_user(user_data, vaddr + offset, len); | ||||||
| 223 | |||||||
| 224 | kunmap_va(vaddr); | ||||||
| 225 | |||||||
| 226 | return ret ? -EFAULT14 : 0; | ||||||
| 227 | } | ||||||
| 228 | |||||||
| 229 | static int | ||||||
| 230 | i915_gem_shmem_pread(struct drm_i915_gem_object *obj, | ||||||
| 231 | struct drm_i915_gem_pread *args) | ||||||
| 232 | { | ||||||
| 233 | unsigned int needs_clflush; | ||||||
| 234 | unsigned int idx, offset; | ||||||
| 235 | char __user *user_data; | ||||||
| 236 | u64 remain; | ||||||
| 237 | int ret; | ||||||
| 238 | |||||||
| 239 | ret = i915_gem_object_lock_interruptible(obj, NULL((void *)0)); | ||||||
| 240 | if (ret) | ||||||
| 241 | return ret; | ||||||
| 242 | |||||||
| 243 | ret = i915_gem_object_pin_pages(obj); | ||||||
| 244 | if (ret) | ||||||
| 245 | goto err_unlock; | ||||||
| 246 | |||||||
| 247 | ret = i915_gem_object_prepare_read(obj, &needs_clflush); | ||||||
| 248 | if (ret) | ||||||
| 249 | goto err_unpin; | ||||||
| 250 | |||||||
| 251 | i915_gem_object_finish_access(obj); | ||||||
| 252 | i915_gem_object_unlock(obj); | ||||||
| 253 | |||||||
| 254 | remain = args->size; | ||||||
| 255 | user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)); | ||||||
| 256 | offset = offset_in_page(args->offset)((vaddr_t)(args->offset) & ((1 << 12) - 1)); | ||||||
| 257 | for (idx = args->offset >> PAGE_SHIFT12; remain; idx++) { | ||||||
| 258 | struct vm_page *page = i915_gem_object_get_page(obj, idx); | ||||||
| 259 | unsigned int length = min_t(u64, remain, PAGE_SIZE - offset)({ u64 __min_a = (remain); u64 __min_b = ((1 << 12) - offset ); __min_a < __min_b ? __min_a : __min_b; }); | ||||||
| 260 | |||||||
| 261 | ret = shmem_pread(page, offset, length, user_data, | ||||||
| 262 | needs_clflush); | ||||||
| 263 | if (ret) | ||||||
| 264 | break; | ||||||
| 265 | |||||||
| 266 | remain -= length; | ||||||
| 267 | user_data += length; | ||||||
| 268 | offset = 0; | ||||||
| 269 | } | ||||||
| 270 | |||||||
| 271 | i915_gem_object_unpin_pages(obj); | ||||||
| 272 | return ret; | ||||||
| 273 | |||||||
| 274 | err_unpin: | ||||||
| 275 | i915_gem_object_unpin_pages(obj); | ||||||
| 276 | err_unlock: | ||||||
| 277 | i915_gem_object_unlock(obj); | ||||||
| 278 | return ret; | ||||||
| 279 | } | ||||||
| 280 | |||||||
| 281 | #ifdef __linux__ | ||||||
| 282 | static inline bool_Bool | ||||||
| 283 | gtt_user_read(struct io_mapping *mapping, | ||||||
| 284 | loff_t base, int offset, | ||||||
| 285 | char __user *user_data, int length) | ||||||
| 286 | { | ||||||
| 287 | void __iomem *vaddr; | ||||||
| 288 | unsigned long unwritten; | ||||||
| 289 | |||||||
| 290 | /* We can use the cpu mem copy function because this is X86. */ | ||||||
| 291 | vaddr = io_mapping_map_atomic_wc(mapping, base); | ||||||
| 292 | unwritten = __copy_to_user_inatomic(user_data, | ||||||
| 293 | (void __force *)vaddr + offset, | ||||||
| 294 | length); | ||||||
| 295 | io_mapping_unmap_atomic(vaddr); | ||||||
| 296 | if (unwritten) { | ||||||
| 297 | vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE(1 << 12)); | ||||||
| 298 | unwritten = copy_to_user(user_data, | ||||||
| 299 | (void __force *)vaddr + offset, | ||||||
| 300 | length); | ||||||
| 301 | io_mapping_unmap(vaddr); | ||||||
| 302 | } | ||||||
| 303 | return unwritten; | ||||||
| 304 | } | ||||||
| 305 | #else | ||||||
| 306 | static inline bool_Bool | ||||||
| 307 | gtt_user_read(struct drm_i915_privateinteldrm_softc *dev_priv, | ||||||
| 308 | loff_t base, int offset, | ||||||
| 309 | char __user *user_data, int length) | ||||||
| 310 | { | ||||||
| 311 | bus_space_handle_t bsh; | ||||||
| 312 | void __iomem *vaddr; | ||||||
| 313 | unsigned long unwritten; | ||||||
| 314 | |||||||
| 315 | /* We can use the cpu mem copy function because this is X86. */ | ||||||
| 316 | agp_map_atomic(dev_priv->agph, base, &bsh); | ||||||
| 317 | vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh))); | ||||||
| 318 | unwritten = __copy_to_user_inatomic(user_data, | ||||||
| 319 | (void __force *)vaddr + offset, | ||||||
| 320 | length); | ||||||
| 321 | agp_unmap_atomic(dev_priv->agph, bsh); | ||||||
| 322 | if (unwritten) { | ||||||
| 323 | agp_map_subregion(dev_priv->agph, base, PAGE_SIZE(1 << 12), &bsh); | ||||||
| 324 | vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh))); | ||||||
| 325 | unwritten = copy_to_user(user_data, | ||||||
| 326 | (void __force *)vaddr + offset, | ||||||
| 327 | length); | ||||||
| 328 | agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE(1 << 12)); | ||||||
| 329 | } | ||||||
| 330 | return unwritten; | ||||||
| 331 | } | ||||||
| 332 | #endif | ||||||
| 333 | |||||||
| 334 | static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj, | ||||||
| 335 | struct drm_mm_node *node, | ||||||
| 336 | bool_Bool write) | ||||||
| 337 | { | ||||||
| 338 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); | ||||||
| 339 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; | ||||||
| 340 | struct i915_vma *vma; | ||||||
| 341 | struct i915_gem_ww_ctx ww; | ||||||
| 342 | int ret; | ||||||
| 343 | |||||||
| 344 | i915_gem_ww_ctx_init(&ww, true1); | ||||||
| 345 | retry: | ||||||
| 346 | vma = ERR_PTR(-ENODEV19); | ||||||
| 347 | ret = i915_gem_object_lock(obj, &ww); | ||||||
| 348 | if (ret) | ||||||
| 349 | goto err_ww; | ||||||
| 350 | |||||||
| 351 | ret = i915_gem_object_set_to_gtt_domain(obj, write); | ||||||
| 352 | if (ret) | ||||||
| 353 | goto err_ww; | ||||||
| 354 | |||||||
| 355 | if (!i915_gem_object_is_tiled(obj)) | ||||||
| 356 | vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL((void *)0), 0, 0, | ||||||
| 357 | PIN_MAPPABLE(1ULL << (3)) | | ||||||
| 358 | PIN_NONBLOCK(1ULL << (2)) /* NOWARN */ | | ||||||
| 359 | PIN_NOEVICT(1ULL << (0))); | ||||||
| 360 | if (vma == ERR_PTR(-EDEADLK11)) { | ||||||
| 361 | ret = -EDEADLK11; | ||||||
| 362 | goto err_ww; | ||||||
| 363 | } else if (!IS_ERR(vma)) { | ||||||
| 364 | node->start = i915_ggtt_offset(vma); | ||||||
| 365 | node->flags = 0; | ||||||
| 366 | } else { | ||||||
| 367 | ret = insert_mappable_node(ggtt, node, PAGE_SIZE(1 << 12)); | ||||||
| 368 | if (ret) | ||||||
| 369 | goto err_ww; | ||||||
| 370 | GEM_BUG_ON(!drm_mm_node_allocated(node))((void)0); | ||||||
| 371 | vma = NULL((void *)0); | ||||||
| 372 | } | ||||||
| 373 | |||||||
| 374 | ret = i915_gem_object_pin_pages(obj); | ||||||
| 375 | if (ret) { | ||||||
| 376 | if (drm_mm_node_allocated(node)) { | ||||||
| 377 | ggtt->vm.clear_range(&ggtt->vm, node->start, node->size); | ||||||
| 378 | remove_mappable_node(ggtt, node); | ||||||
| 379 | } else { | ||||||
| 380 | i915_vma_unpin(vma); | ||||||
| 381 | } | ||||||
| 382 | } | ||||||
| 383 | |||||||
| 384 | err_ww: | ||||||
| 385 | if (ret == -EDEADLK11) { | ||||||
| 386 | ret = i915_gem_ww_ctx_backoff(&ww); | ||||||
| 387 | if (!ret) | ||||||
| 388 | goto retry; | ||||||
| 389 | } | ||||||
| 390 | i915_gem_ww_ctx_fini(&ww); | ||||||
| 391 | |||||||
| 392 | return ret
| ||||||
| 393 | } | ||||||
| 394 | |||||||
| 395 | static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj, | ||||||
| 396 | struct drm_mm_node *node, | ||||||
| 397 | struct i915_vma *vma) | ||||||
| 398 | { | ||||||
| 399 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); | ||||||
| 400 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; | ||||||
| 401 | |||||||
| 402 | i915_gem_object_unpin_pages(obj); | ||||||
| 403 | if (drm_mm_node_allocated(node)) { | ||||||
| 404 | ggtt->vm.clear_range(&ggtt->vm, node->start, node->size); | ||||||
| 405 | remove_mappable_node(ggtt, node); | ||||||
| 406 | } else { | ||||||
| 407 | i915_vma_unpin(vma); | ||||||
| 408 | } | ||||||
| 409 | } | ||||||
| 410 | |||||||
| 411 | static int | ||||||
| 412 | i915_gem_gtt_pread(struct drm_i915_gem_object *obj, | ||||||
| 413 | const struct drm_i915_gem_pread *args) | ||||||
| 414 | { | ||||||
| 415 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); | ||||||
| 416 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; | ||||||
| 417 | intel_wakeref_t wakeref; | ||||||
| 418 | struct drm_mm_node node; | ||||||
| 419 | void __user *user_data; | ||||||
| 420 | struct i915_vma *vma; | ||||||
| 421 | u64 remain, offset; | ||||||
| 422 | int ret = 0; | ||||||
| 423 | |||||||
| 424 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); | ||||||
| 425 | |||||||
| 426 | vma = i915_gem_gtt_prepare(obj, &node, false0); | ||||||
| |||||||
| 427 | if (IS_ERR(vma)) { | ||||||
| 428 | ret = PTR_ERR(vma); | ||||||
| 429 | goto out_rpm; | ||||||
| 430 | } | ||||||
| 431 | |||||||
| 432 | user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)); | ||||||
| 433 | remain = args->size; | ||||||
| 434 | offset = args->offset; | ||||||
| 435 | |||||||
| 436 | while (remain > 0) { | ||||||
| 437 | /* Operation in this page | ||||||
| 438 | * | ||||||
| 439 | * page_base = page offset within aperture | ||||||
| 440 | * page_offset = offset within page | ||||||
| 441 | * page_length = bytes to copy for this page | ||||||
| 442 | */ | ||||||
| 443 | u32 page_base = node.start; | ||||||
| 444 | unsigned page_offset = offset_in_page(offset)((vaddr_t)(offset) & ((1 << 12) - 1)); | ||||||
| 445 | unsigned page_length = PAGE_SIZE(1 << 12) - page_offset; | ||||||
| 446 | page_length = remain < page_length ? remain : page_length; | ||||||
| 447 | if (drm_mm_node_allocated(&node)) { | ||||||
| 448 | ggtt->vm.insert_page(&ggtt->vm, | ||||||
| 449 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT12), | ||||||
| 450 | node.start, I915_CACHE_NONE, 0); | ||||||
| 451 | } else { | ||||||
| 452 | page_base += offset & LINUX_PAGE_MASK(~((1 << 12) - 1)); | ||||||
| 453 | } | ||||||
| 454 | |||||||
| 455 | if (gtt_user_read(i915, page_base, page_offset, | ||||||
| 456 | user_data, page_length)) { | ||||||
| 457 | ret = -EFAULT14; | ||||||
| 458 | break; | ||||||
| 459 | } | ||||||
| 460 | |||||||
| 461 | remain -= page_length; | ||||||
| 462 | user_data += page_length; | ||||||
| 463 | offset += page_length; | ||||||
| 464 | } | ||||||
| 465 | |||||||
| 466 | i915_gem_gtt_cleanup(obj, &node, vma); | ||||||
| 467 | out_rpm: | ||||||
| 468 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); | ||||||
| 469 | return ret; | ||||||
| 470 | } | ||||||
| 471 | |||||||
| 472 | /** | ||||||
| 473 | * Reads data from the object referenced by handle. | ||||||
| 474 | * @dev: drm device pointer | ||||||
| 475 | * @data: ioctl data blob | ||||||
| 476 | * @file: drm file pointer | ||||||
| 477 | * | ||||||
| 478 | * On error, the contents of *data are undefined. | ||||||
| 479 | */ | ||||||
| 480 | int | ||||||
| 481 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, | ||||||
| 482 | struct drm_file *file) | ||||||
| 483 | { | ||||||
| 484 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); | ||||||
| 485 | struct drm_i915_gem_pread *args = data; | ||||||
| 486 | struct drm_i915_gem_object *obj; | ||||||
| 487 | int ret; | ||||||
| 488 | |||||||
| 489 | /* PREAD is disallowed for all platforms after TGL-LP. This also | ||||||
| 490 | * covers all platforms with local memory. | ||||||
| 491 | */ | ||||||
| 492 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 12 && !IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE)) | ||||||
| 493 | return -EOPNOTSUPP45; | ||||||
| 494 | |||||||
| 495 | if (args->size == 0) | ||||||
| 496 | return 0; | ||||||
| 497 | |||||||
| 498 | if (!access_ok(u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)), | ||||||
| 499 | args->size)) | ||||||
| 500 | return -EFAULT14; | ||||||
| 501 | |||||||
| 502 | obj = i915_gem_object_lookup(file, args->handle); | ||||||
| 503 | if (!obj) | ||||||
| 504 | return -ENOENT2; | ||||||
| 505 | |||||||
| 506 | /* Bounds check source. */ | ||||||
| 507 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)({ typeof((u64)(args->offset)) start__ = ((u64)(args->offset )); typeof((u64)(args->size)) size__ = ((u64)(args->size )); typeof((u64)(obj->base.size)) max__ = ((u64)(obj->base .size)); (void)(&start__ == &size__); (void)(&start__ == &max__); start__ >= max__ || size__ > max__ - start__ ; })) { | ||||||
| 508 | ret = -EINVAL22; | ||||||
| 509 | goto out; | ||||||
| 510 | } | ||||||
| 511 | |||||||
| 512 | trace_i915_gem_object_pread(obj, args->offset, args->size); | ||||||
| 513 | ret = -ENODEV19; | ||||||
| 514 | if (obj->ops->pread) | ||||||
| 515 | ret = obj->ops->pread(obj, args); | ||||||
| 516 | if (ret != -ENODEV19) | ||||||
| 517 | goto out; | ||||||
| 518 | |||||||
| 519 | ret = i915_gem_object_wait(obj, | ||||||
| 520 | I915_WAIT_INTERRUPTIBLE(1UL << (0)), | ||||||
| 521 | MAX_SCHEDULE_TIMEOUT(0x7fffffff)); | ||||||
| 522 | if (ret) | ||||||
| 523 | goto out; | ||||||
| 524 | |||||||
| 525 | ret = i915_gem_shmem_pread(obj, args); | ||||||
| 526 | if (ret == -EFAULT14 || ret == -ENODEV19) | ||||||
| 527 | ret = i915_gem_gtt_pread(obj, args); | ||||||
| 528 | |||||||
| 529 | out: | ||||||
| 530 | i915_gem_object_put(obj); | ||||||
| 531 | return ret; | ||||||
| 532 | } | ||||||
| 533 | |||||||
| 534 | /* This is the fast write path which cannot handle | ||||||
| 535 | * page faults in the source data | ||||||
| 536 | */ | ||||||
| 537 | #ifdef __linux__ | ||||||
| 538 | static inline bool_Bool | ||||||
| 539 | ggtt_write(struct io_mapping *mapping, | ||||||
| 540 | loff_t base, int offset, | ||||||
| 541 | char __user *user_data, int length) | ||||||
| 542 | { | ||||||
| 543 | void __iomem *vaddr; | ||||||
| 544 | unsigned long unwritten; | ||||||
| 545 | |||||||
| 546 | /* We can use the cpu mem copy function because this is X86. */ | ||||||
| 547 | vaddr = io_mapping_map_atomic_wc(mapping, base); | ||||||
| 548 | unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, | ||||||
| 549 | user_data, length); | ||||||
| 550 | io_mapping_unmap_atomic(vaddr); | ||||||
| 551 | if (unwritten) { | ||||||
| 552 | vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE(1 << 12)); | ||||||
| 553 | unwritten = copy_from_user((void __force *)vaddr + offset, | ||||||
| 554 | user_data, length); | ||||||
| 555 | io_mapping_unmap(vaddr); | ||||||
| 556 | } | ||||||
| 557 | |||||||
| 558 | return unwritten; | ||||||
| 559 | } | ||||||
| 560 | #else | ||||||
| 561 | static inline bool_Bool | ||||||
| 562 | ggtt_write(struct drm_i915_privateinteldrm_softc *dev_priv, | ||||||
| 563 | loff_t base, int offset, | ||||||
| 564 | char __user *user_data, int length) | ||||||
| 565 | { | ||||||
| 566 | bus_space_handle_t bsh; | ||||||
| 567 | void __iomem *vaddr; | ||||||
| 568 | unsigned long unwritten; | ||||||
| 569 | |||||||
| 570 | /* We can use the cpu mem copy function because this is X86. */ | ||||||
| 571 | agp_map_atomic(dev_priv->agph, base, &bsh); | ||||||
| 572 | vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh))); | ||||||
| 573 | unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, | ||||||
| 574 | user_data, length); | ||||||
| 575 | agp_unmap_atomic(dev_priv->agph, bsh); | ||||||
| 576 | if (unwritten) { | ||||||
| 577 | agp_map_subregion(dev_priv->agph, base, PAGE_SIZE(1 << 12), &bsh); | ||||||
| 578 | vaddr = bus_space_vaddr(dev_priv->bst, bsh)((dev_priv->bst)->vaddr((bsh))); | ||||||
| 579 | unwritten = copy_from_user((void __force *)vaddr + offset, | ||||||
| 580 | user_data, length); | ||||||
| 581 | agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE(1 << 12)); | ||||||
| 582 | } | ||||||
| 583 | |||||||
| 584 | return unwritten; | ||||||
| 585 | } | ||||||
| 586 | #endif | ||||||
| 587 | |||||||
| 588 | /** | ||||||
| 589 | * This is the fast pwrite path, where we copy the data directly from the | ||||||
| 590 | * user into the GTT, uncached. | ||||||
| 591 | * @obj: i915 GEM object | ||||||
| 592 | * @args: pwrite arguments structure | ||||||
| 593 | */ | ||||||
| 594 | static int | ||||||
| 595 | i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, | ||||||
| 596 | const struct drm_i915_gem_pwrite *args) | ||||||
| 597 | { | ||||||
| 598 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); | ||||||
| 599 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; | ||||||
| 600 | struct intel_runtime_pm *rpm = &i915->runtime_pm; | ||||||
| 601 | intel_wakeref_t wakeref; | ||||||
| 602 | struct drm_mm_node node; | ||||||
| 603 | struct i915_vma *vma; | ||||||
| 604 | u64 remain, offset; | ||||||
| 605 | void __user *user_data; | ||||||
| 606 | int ret = 0; | ||||||
| 607 | |||||||
| 608 | if (i915_gem_object_has_struct_page(obj)) { | ||||||
| 609 | /* | ||||||
| 610 | * Avoid waking the device up if we can fallback, as | ||||||
| 611 | * waking/resuming is very slow (worst-case 10-100 ms | ||||||
| 612 | * depending on PCI sleeps and our own resume time). | ||||||
| 613 | * This easily dwarfs any performance advantage from | ||||||
| 614 | * using the cache bypass of indirect GGTT access. | ||||||
| 615 | */ | ||||||
| 616 | wakeref = intel_runtime_pm_get_if_in_use(rpm); | ||||||
| 617 | if (!wakeref) | ||||||
| 618 | return -EFAULT14; | ||||||
| 619 | } else { | ||||||
| 620 | /* No backing pages, no fallback, we must force GGTT access */ | ||||||
| 621 | wakeref = intel_runtime_pm_get(rpm); | ||||||
| 622 | } | ||||||
| 623 | |||||||
| 624 | vma = i915_gem_gtt_prepare(obj, &node, true1); | ||||||
| 625 | if (IS_ERR(vma)) { | ||||||
| 626 | ret = PTR_ERR(vma); | ||||||
| 627 | goto out_rpm; | ||||||
| 628 | } | ||||||
| 629 | |||||||
| 630 | i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); | ||||||
| 631 | |||||||
| 632 | user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)); | ||||||
| 633 | offset = args->offset; | ||||||
| 634 | remain = args->size; | ||||||
| 635 | while (remain) { | ||||||
| 636 | /* Operation in this page | ||||||
| 637 | * | ||||||
| 638 | * page_base = page offset within aperture | ||||||
| 639 | * page_offset = offset within page | ||||||
| 640 | * page_length = bytes to copy for this page | ||||||
| 641 | */ | ||||||
| 642 | u32 page_base = node.start; | ||||||
| 643 | unsigned int page_offset = offset_in_page(offset)((vaddr_t)(offset) & ((1 << 12) - 1)); | ||||||
| 644 | unsigned int page_length = PAGE_SIZE(1 << 12) - page_offset; | ||||||
| 645 | page_length = remain < page_length ? remain : page_length; | ||||||
| 646 | if (drm_mm_node_allocated(&node)) { | ||||||
| 647 | /* flush the write before we modify the GGTT */ | ||||||
| 648 | intel_gt_flush_ggtt_writes(ggtt->vm.gt); | ||||||
| 649 | ggtt->vm.insert_page(&ggtt->vm, | ||||||
| 650 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT12), | ||||||
| 651 | node.start, I915_CACHE_NONE, 0); | ||||||
| 652 | wmb()do { __asm volatile("sfence" ::: "memory"); } while (0); /* flush modifications to the GGTT (insert_page) */ | ||||||
| 653 | } else { | ||||||
| 654 | page_base += offset & LINUX_PAGE_MASK(~((1 << 12) - 1)); | ||||||
| 655 | } | ||||||
| 656 | /* If we get a fault while copying data, then (presumably) our | ||||||
| 657 | * source page isn't available. Return the error and we'll | ||||||
| 658 | * retry in the slow path. | ||||||
| 659 | * If the object is non-shmem backed, we retry again with the | ||||||
| 660 | * path that handles page fault. | ||||||
| 661 | */ | ||||||
| 662 | if (ggtt_write(i915, page_base, page_offset, | ||||||
| 663 | user_data, page_length)) { | ||||||
| 664 | ret = -EFAULT14; | ||||||
| 665 | break; | ||||||
| 666 | } | ||||||
| 667 | |||||||
| 668 | remain -= page_length; | ||||||
| 669 | user_data += page_length; | ||||||
| 670 | offset += page_length; | ||||||
| 671 | } | ||||||
| 672 | |||||||
| 673 | intel_gt_flush_ggtt_writes(ggtt->vm.gt); | ||||||
| 674 | i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); | ||||||
| 675 | |||||||
| 676 | i915_gem_gtt_cleanup(obj, &node, vma); | ||||||
| 677 | out_rpm: | ||||||
| 678 | intel_runtime_pm_put(rpm, wakeref); | ||||||
| 679 | return ret; | ||||||
| 680 | } | ||||||
| 681 | |||||||
| 682 | /* Per-page copy function for the shmem pwrite fastpath. | ||||||
| 683 | * Flushes invalid cachelines before writing to the target if | ||||||
| 684 | * needs_clflush_before is set and flushes out any written cachelines after | ||||||
| 685 | * writing if needs_clflush is set. | ||||||
| 686 | */ | ||||||
| 687 | static int | ||||||
| 688 | shmem_pwrite(struct vm_page *page, int offset, int len, char __user *user_data, | ||||||
| 689 | bool_Bool needs_clflush_before, | ||||||
| 690 | bool_Bool needs_clflush_after) | ||||||
| 691 | { | ||||||
| 692 | char *vaddr; | ||||||
| 693 | int ret; | ||||||
| 694 | |||||||
| 695 | vaddr = kmap(page); | ||||||
| 696 | |||||||
| 697 | if (needs_clflush_before) | ||||||
| 698 | drm_clflush_virt_range(vaddr + offset, len); | ||||||
| 699 | |||||||
| 700 | ret = __copy_from_user(vaddr + offset, user_data, len); | ||||||
| 701 | if (!ret && needs_clflush_after) | ||||||
| 702 | drm_clflush_virt_range(vaddr + offset, len); | ||||||
| 703 | |||||||
| 704 | kunmap_va(vaddr); | ||||||
| 705 | |||||||
| 706 | return ret ? -EFAULT14 : 0; | ||||||
| 707 | } | ||||||
| 708 | |||||||
| 709 | static int | ||||||
| 710 | i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, | ||||||
| 711 | const struct drm_i915_gem_pwrite *args) | ||||||
| 712 | { | ||||||
| 713 | unsigned int partial_cacheline_write; | ||||||
| 714 | unsigned int needs_clflush; | ||||||
| 715 | unsigned int offset, idx; | ||||||
| 716 | void __user *user_data; | ||||||
| 717 | u64 remain; | ||||||
| 718 | int ret; | ||||||
| 719 | |||||||
| 720 | ret = i915_gem_object_lock_interruptible(obj, NULL((void *)0)); | ||||||
| 721 | if (ret) | ||||||
| 722 | return ret; | ||||||
| 723 | |||||||
| 724 | ret = i915_gem_object_pin_pages(obj); | ||||||
| 725 | if (ret) | ||||||
| 726 | goto err_unlock; | ||||||
| 727 | |||||||
| 728 | ret = i915_gem_object_prepare_write(obj, &needs_clflush); | ||||||
| 729 | if (ret) | ||||||
| 730 | goto err_unpin; | ||||||
| 731 | |||||||
| 732 | i915_gem_object_finish_access(obj); | ||||||
| 733 | i915_gem_object_unlock(obj); | ||||||
| 734 | |||||||
| 735 | /* If we don't overwrite a cacheline completely we need to be | ||||||
| 736 | * careful to have up-to-date data by first clflushing. Don't | ||||||
| 737 | * overcomplicate things and flush the entire patch. | ||||||
| 738 | */ | ||||||
| 739 | partial_cacheline_write = 0; | ||||||
| 740 | if (needs_clflush & CLFLUSH_BEFORE(1UL << (0))) | ||||||
| 741 | partial_cacheline_write = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_cflushsz - 1; | ||||||
| 742 | |||||||
| 743 | user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)); | ||||||
| 744 | remain = args->size; | ||||||
| 745 | offset = offset_in_page(args->offset)((vaddr_t)(args->offset) & ((1 << 12) - 1)); | ||||||
| 746 | for (idx = args->offset >> PAGE_SHIFT12; remain; idx++) { | ||||||
| 747 | struct vm_page *page = i915_gem_object_get_page(obj, idx); | ||||||
| 748 | unsigned int length = min_t(u64, remain, PAGE_SIZE - offset)({ u64 __min_a = (remain); u64 __min_b = ((1 << 12) - offset ); __min_a < __min_b ? __min_a : __min_b; }); | ||||||
| 749 | |||||||
| 750 | ret = shmem_pwrite(page, offset, length, user_data, | ||||||
| 751 | (offset | length) & partial_cacheline_write, | ||||||
| 752 | needs_clflush & CLFLUSH_AFTER(1UL << (1))); | ||||||
| 753 | if (ret) | ||||||
| 754 | break; | ||||||
| 755 | |||||||
| 756 | remain -= length; | ||||||
| 757 | user_data += length; | ||||||
| 758 | offset = 0; | ||||||
| 759 | } | ||||||
| 760 | |||||||
| 761 | i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); | ||||||
| 762 | |||||||
| 763 | i915_gem_object_unpin_pages(obj); | ||||||
| 764 | return ret; | ||||||
| 765 | |||||||
| 766 | err_unpin: | ||||||
| 767 | i915_gem_object_unpin_pages(obj); | ||||||
| 768 | err_unlock: | ||||||
| 769 | i915_gem_object_unlock(obj); | ||||||
| 770 | return ret; | ||||||
| 771 | } | ||||||
| 772 | |||||||
| 773 | /** | ||||||
| 774 | * Writes data to the object referenced by handle. | ||||||
| 775 | * @dev: drm device | ||||||
| 776 | * @data: ioctl data blob | ||||||
| 777 | * @file: drm file | ||||||
| 778 | * | ||||||
| 779 | * On error, the contents of the buffer that were to be modified are undefined. | ||||||
| 780 | */ | ||||||
| 781 | int | ||||||
| 782 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | ||||||
| 783 | struct drm_file *file) | ||||||
| 784 | { | ||||||
| 785 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); | ||||||
| 786 | struct drm_i915_gem_pwrite *args = data; | ||||||
| 787 | struct drm_i915_gem_object *obj; | ||||||
| 788 | int ret; | ||||||
| 789 | |||||||
| 790 | /* PWRITE is disallowed for all platforms after TGL-LP. This also | ||||||
| 791 | * covers all platforms with local memory. | ||||||
| 792 | */ | ||||||
| 793 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) >= 12 && !IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE)) | ||||||
| 794 | return -EOPNOTSUPP45; | ||||||
| 795 | |||||||
| 796 | if (args->size == 0) | ||||||
| 797 | return 0; | ||||||
| 798 | |||||||
| 799 | if (!access_ok(u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)), args->size)) | ||||||
| 800 | return -EFAULT14; | ||||||
| 801 | |||||||
| 802 | obj = i915_gem_object_lookup(file, args->handle); | ||||||
| 803 | if (!obj) | ||||||
| 804 | return -ENOENT2; | ||||||
| 805 | |||||||
| 806 | /* Bounds check destination. */ | ||||||
| 807 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)({ typeof((u64)(args->offset)) start__ = ((u64)(args->offset )); typeof((u64)(args->size)) size__ = ((u64)(args->size )); typeof((u64)(obj->base.size)) max__ = ((u64)(obj->base .size)); (void)(&start__ == &size__); (void)(&start__ == &max__); start__ >= max__ || size__ > max__ - start__ ; })) { | ||||||
| 808 | ret = -EINVAL22; | ||||||
| 809 | goto err; | ||||||
| 810 | } | ||||||
| 811 | |||||||
| 812 | /* Writes not allowed into this read-only object */ | ||||||
| 813 | if (i915_gem_object_is_readonly(obj)) { | ||||||
| 814 | ret = -EINVAL22; | ||||||
| 815 | goto err; | ||||||
| 816 | } | ||||||
| 817 | |||||||
| 818 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); | ||||||
| 819 | |||||||
| 820 | ret = -ENODEV19; | ||||||
| 821 | if (obj->ops->pwrite) | ||||||
| 822 | ret = obj->ops->pwrite(obj, args); | ||||||
| 823 | if (ret != -ENODEV19) | ||||||
| 824 | goto err; | ||||||
| 825 | |||||||
| 826 | ret = i915_gem_object_wait(obj, | ||||||
| 827 | I915_WAIT_INTERRUPTIBLE(1UL << (0)) | | ||||||
| 828 | I915_WAIT_ALL(1UL << (2)), | ||||||
| 829 | MAX_SCHEDULE_TIMEOUT(0x7fffffff)); | ||||||
| 830 | if (ret) | ||||||
| 831 | goto err; | ||||||
| 832 | |||||||
| 833 | ret = -EFAULT14; | ||||||
| 834 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | ||||||
| 835 | * it would end up going through the fenced access, and we'll get | ||||||
| 836 | * different detiling behavior between reading and writing. | ||||||
| 837 | * pread/pwrite currently are reading and writing from the CPU | ||||||
| 838 | * perspective, requiring manual detiling by the client. | ||||||
| 839 | */ | ||||||
| 840 | if (!i915_gem_object_has_struct_page(obj) || | ||||||
| 841 | i915_gem_cpu_write_needs_clflush(obj)) | ||||||
| 842 | /* Note that the gtt paths might fail with non-page-backed user | ||||||
| 843 | * pointers (e.g. gtt mappings when moving data between | ||||||
| 844 | * textures). Fallback to the shmem path in that case. | ||||||
| 845 | */ | ||||||
| 846 | ret = i915_gem_gtt_pwrite_fast(obj, args); | ||||||
| 847 | |||||||
| 848 | if (ret == -EFAULT14 || ret == -ENOSPC28) { | ||||||
| 849 | if (i915_gem_object_has_struct_page(obj)) | ||||||
| 850 | ret = i915_gem_shmem_pwrite(obj, args); | ||||||
| 851 | } | ||||||
| 852 | |||||||
| 853 | err: | ||||||
| 854 | i915_gem_object_put(obj); | ||||||
| 855 | return ret; | ||||||
| 856 | } | ||||||
| 857 | |||||||
| 858 | /** | ||||||
| 859 | * Called when user space has done writes to this buffer | ||||||
| 860 | * @dev: drm device | ||||||
| 861 | * @data: ioctl data blob | ||||||
| 862 | * @file: drm file | ||||||
| 863 | */ | ||||||
| 864 | int | ||||||
| 865 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | ||||||
| 866 | struct drm_file *file) | ||||||
| 867 | { | ||||||
| 868 | struct drm_i915_gem_sw_finish *args = data; | ||||||
| 869 | struct drm_i915_gem_object *obj; | ||||||
| 870 | |||||||
| 871 | obj = i915_gem_object_lookup(file, args->handle); | ||||||
| 872 | if (!obj) | ||||||
| 873 | return -ENOENT2; | ||||||
| 874 | |||||||
| 875 | /* | ||||||
| 876 | * Proxy objects are barred from CPU access, so there is no | ||||||
| 877 | * need to ban sw_finish as it is a nop. | ||||||
| 878 | */ | ||||||
| 879 | |||||||
| 880 | /* Pinned buffers may be scanout, so flush the cache */ | ||||||
| 881 | i915_gem_object_flush_if_display(obj); | ||||||
| 882 | i915_gem_object_put(obj); | ||||||
| 883 | |||||||
| 884 | return 0; | ||||||
| 885 | } | ||||||
| 886 | |||||||
| 887 | void i915_gem_runtime_suspend(struct drm_i915_privateinteldrm_softc *i915) | ||||||
| 888 | { | ||||||
| 889 | struct drm_i915_gem_object *obj, *on; | ||||||
| 890 | int i; | ||||||
| 891 | |||||||
| 892 | /* | ||||||
| 893 | * Only called during RPM suspend. All users of the userfault_list | ||||||
| 894 | * must be holding an RPM wakeref to ensure that this can not | ||||||
| 895 | * run concurrently with themselves (and use the struct_mutex for | ||||||
| 896 | * protection between themselves). | ||||||
| 897 | */ | ||||||
| 898 | |||||||
| 899 | list_for_each_entry_safe(obj, on,for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = ((&to_gt(i915)->ggtt->userfault_list)-> next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}), on = ({ const __typeof ( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = (obj-> userfault_link.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}); &obj->userfault_link != (&to_gt(i915)->ggtt->userfault_list); obj = on, on = ({ const __typeof( ((__typeof(*on) *)0)->userfault_link ) *__mptr = (on->userfault_link.next); (__typeof(*on) *)( (char *)__mptr - __builtin_offsetof(__typeof(*on), userfault_link ) );})) | ||||||
| 900 | &to_gt(i915)->ggtt->userfault_list, userfault_link)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = ((&to_gt(i915)->ggtt->userfault_list)-> next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}), on = ({ const __typeof ( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = (obj-> userfault_link.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}); &obj->userfault_link != (&to_gt(i915)->ggtt->userfault_list); obj = on, on = ({ const __typeof( ((__typeof(*on) *)0)->userfault_link ) *__mptr = (on->userfault_link.next); (__typeof(*on) *)( (char *)__mptr - __builtin_offsetof(__typeof(*on), userfault_link ) );})) | ||||||
| 901 | __i915_gem_object_release_mmap_gtt(obj); | ||||||
| 902 | |||||||
| 903 | list_for_each_entry_safe(obj, on,for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = ((&i915->runtime_pm.lmem_userfault_list)-> next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}), on = ({ const __typeof ( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = (obj-> userfault_link.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}); &obj->userfault_link != (&i915->runtime_pm.lmem_userfault_list); obj = on, on = ({ const __typeof( ((__typeof(*on) *)0)->userfault_link ) *__mptr = (on->userfault_link.next); (__typeof(*on) *)( (char *)__mptr - __builtin_offsetof(__typeof(*on), userfault_link ) );})) | ||||||
| 904 | &i915->runtime_pm.lmem_userfault_list, userfault_link)for (obj = ({ const __typeof( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = ((&i915->runtime_pm.lmem_userfault_list)-> next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}), on = ({ const __typeof ( ((__typeof(*obj) *)0)->userfault_link ) *__mptr = (obj-> userfault_link.next); (__typeof(*obj) *)( (char *)__mptr - __builtin_offsetof (__typeof(*obj), userfault_link) );}); &obj->userfault_link != (&i915->runtime_pm.lmem_userfault_list); obj = on, on = ({ const __typeof( ((__typeof(*on) *)0)->userfault_link ) *__mptr = (on->userfault_link.next); (__typeof(*on) *)( (char *)__mptr - __builtin_offsetof(__typeof(*on), userfault_link ) );})) | ||||||
| 905 | i915_gem_object_runtime_pm_release_mmap_offset(obj); | ||||||
| 906 | |||||||
| 907 | /* | ||||||
| 908 | * The fence will be lost when the device powers down. If any were | ||||||
| 909 | * in use by hardware (i.e. they are pinned), we should not be powering | ||||||
| 910 | * down! All other fences will be reacquired by the user upon waking. | ||||||
| 911 | */ | ||||||
| 912 | for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) { | ||||||
| 913 | struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i]; | ||||||
| 914 | |||||||
| 915 | /* | ||||||
| 916 | * Ideally we want to assert that the fence register is not | ||||||
| 917 | * live at this point (i.e. that no piece of code will be | ||||||
| 918 | * trying to write through fence + GTT, as that both violates | ||||||
| 919 | * our tracking of activity and associated locking/barriers, | ||||||
| 920 | * but also is illegal given that the hw is powered down). | ||||||
| 921 | * | ||||||
| 922 | * Previously we used reg->pin_count as a "liveness" indicator. | ||||||
| 923 | * That is not sufficient, and we need a more fine-grained | ||||||
| 924 | * tool if we want to have a sanity check here. | ||||||
| 925 | */ | ||||||
| 926 | |||||||
| 927 | if (!reg->vma) | ||||||
| 928 | continue; | ||||||
| 929 | |||||||
| 930 | GEM_BUG_ON(i915_vma_has_userfault(reg->vma))((void)0); | ||||||
| 931 | reg->dirty = true1; | ||||||
| 932 | } | ||||||
| 933 | } | ||||||
| 934 | |||||||
| 935 | static void discard_ggtt_vma(struct i915_vma *vma) | ||||||
| 936 | { | ||||||
| 937 | struct drm_i915_gem_object *obj = vma->obj; | ||||||
| 938 | |||||||
| 939 | spin_lock(&obj->vma.lock)mtx_enter(&obj->vma.lock); | ||||||
| 940 | if (!RB_EMPTY_NODE(&vma->obj_node)((&vma->obj_node)->__entry.rbe_parent == &vma-> obj_node)) { | ||||||
| 941 | rb_erase(&vma->obj_node, &obj->vma.tree)linux_root_RB_REMOVE((struct linux_root *)(&obj->vma.tree ), (&vma->obj_node)); | ||||||
| 942 | RB_CLEAR_NODE(&vma->obj_node)(((&vma->obj_node))->__entry.rbe_parent = (&vma ->obj_node)); | ||||||
| 943 | } | ||||||
| 944 | spin_unlock(&obj->vma.lock)mtx_leave(&obj->vma.lock); | ||||||
| 945 | } | ||||||
| 946 | |||||||
| 947 | struct i915_vma * | ||||||
| 948 | i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, | ||||||
| 949 | struct i915_gem_ww_ctx *ww, | ||||||
| 950 | const struct i915_gtt_view *view, | ||||||
| 951 | u64 size, u64 alignment, u64 flags) | ||||||
| 952 | { | ||||||
| 953 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); | ||||||
| 954 | struct i915_ggtt *ggtt = to_gt(i915)->ggtt; | ||||||
| 955 | struct i915_vma *vma; | ||||||
| 956 | int ret; | ||||||
| 957 | |||||||
| 958 | GEM_WARN_ON(!ww)({ __builtin_expect(!!(!!(!ww)), 0); }); | ||||||
| 959 | |||||||
| 960 | if (flags & PIN_MAPPABLE(1ULL << (3)) && | ||||||
| 961 | (!view || view->type == I915_GTT_VIEW_NORMAL)) { | ||||||
| 962 | /* | ||||||
| 963 | * If the required space is larger than the available | ||||||
| 964 | * aperture, we will not able to find a slot for the | ||||||
| 965 | * object and unbinding the object now will be in | ||||||
| 966 | * vain. Worse, doing so may cause us to ping-pong | ||||||
| 967 | * the object in and out of the Global GTT and | ||||||
| 968 | * waste a lot of cycles under the mutex. | ||||||
| 969 | */ | ||||||
| 970 | if (obj->base.size > ggtt->mappable_end) | ||||||
| 971 | return ERR_PTR(-E2BIG7); | ||||||
| 972 | |||||||
| 973 | /* | ||||||
| 974 | * If NONBLOCK is set the caller is optimistically | ||||||
| 975 | * trying to cache the full object within the mappable | ||||||
| 976 | * aperture, and *must* have a fallback in place for | ||||||
| 977 | * situations where we cannot bind the object. We | ||||||
| 978 | * can be a little more lax here and use the fallback | ||||||
| 979 | * more often to avoid costly migrations of ourselves | ||||||
| 980 | * and other objects within the aperture. | ||||||
| 981 | * | ||||||
| 982 | * Half-the-aperture is used as a simple heuristic. | ||||||
| 983 | * More interesting would to do search for a free | ||||||
| 984 | * block prior to making the commitment to unbind. | ||||||
| 985 | * That caters for the self-harm case, and with a | ||||||
| 986 | * little more heuristics (e.g. NOFAULT, NOEVICT) | ||||||
| 987 | * we could try to minimise harm to others. | ||||||
| 988 | */ | ||||||
| 989 | if (flags & PIN_NONBLOCK(1ULL << (2)) && | ||||||
| 990 | obj->base.size > ggtt->mappable_end / 2) | ||||||
| 991 | return ERR_PTR(-ENOSPC28); | ||||||
| 992 | } | ||||||
| 993 | |||||||
| 994 | new_vma: | ||||||
| 995 | vma = i915_vma_instance(obj, &ggtt->vm, view); | ||||||
| 996 | if (IS_ERR(vma)) | ||||||
| 997 | return vma; | ||||||
| 998 | |||||||
| 999 | if (i915_vma_misplaced(vma, size, alignment, flags)) { | ||||||
| 1000 | if (flags & PIN_NONBLOCK(1ULL << (2))) { | ||||||
| 1001 | if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) | ||||||
| 1002 | return ERR_PTR(-ENOSPC28); | ||||||
| 1003 | |||||||
| 1004 | /* | ||||||
| 1005 | * If this misplaced vma is too big (i.e, at-least | ||||||
| 1006 | * half the size of aperture) or hasn't been pinned | ||||||
| 1007 | * mappable before, we ignore the misplacement when | ||||||
| 1008 | * PIN_NONBLOCK is set in order to avoid the ping-pong | ||||||
| 1009 | * issue described above. In other words, we try to | ||||||
| 1010 | * avoid the costly operation of unbinding this vma | ||||||
| 1011 | * from the GGTT and rebinding it back because there | ||||||
| 1012 | * may not be enough space for this vma in the aperture. | ||||||
| 1013 | */ | ||||||
| 1014 | if (flags & PIN_MAPPABLE(1ULL << (3)) && | ||||||
| 1015 | (vma->fence_size > ggtt->mappable_end / 2 || | ||||||
| 1016 | !i915_vma_is_map_and_fenceable(vma))) | ||||||
| 1017 | return ERR_PTR(-ENOSPC28); | ||||||
| 1018 | } | ||||||
| 1019 | |||||||
| 1020 | if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) { | ||||||
| 1021 | discard_ggtt_vma(vma); | ||||||
| 1022 | goto new_vma; | ||||||
| 1023 | } | ||||||
| 1024 | |||||||
| 1025 | ret = i915_vma_unbind(vma); | ||||||
| 1026 | if (ret) | ||||||
| 1027 | return ERR_PTR(ret); | ||||||
| 1028 | } | ||||||
| 1029 | |||||||
| 1030 | ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL(1ULL << (10))); | ||||||
| 1031 | |||||||
| 1032 | if (ret) | ||||||
| 1033 | return ERR_PTR(ret); | ||||||
| 1034 | |||||||
| 1035 | if (vma->fence && !i915_gem_object_is_tiled(obj)) { | ||||||
| 1036 | mutex_lock(&ggtt->vm.mutex)rw_enter_write(&ggtt->vm.mutex); | ||||||
| 1037 | i915_vma_revoke_fence(vma); | ||||||
| 1038 | mutex_unlock(&ggtt->vm.mutex)rw_exit_write(&ggtt->vm.mutex); | ||||||
| 1039 | } | ||||||
| 1040 | |||||||
| 1041 | ret = i915_vma_wait_for_bind(vma); | ||||||
| 1042 | if (ret) { | ||||||
| 1043 | i915_vma_unpin(vma); | ||||||
| 1044 | return ERR_PTR(ret); | ||||||
| 1045 | } | ||||||
| 1046 | |||||||
| 1047 | return vma; | ||||||
| 1048 | } | ||||||
| 1049 | |||||||
| 1050 | struct i915_vma * __must_check | ||||||
| 1051 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, | ||||||
| 1052 | const struct i915_gtt_view *view, | ||||||
| 1053 | u64 size, u64 alignment, u64 flags) | ||||||
| 1054 | { | ||||||
| 1055 | struct i915_gem_ww_ctx ww; | ||||||
| 1056 | struct i915_vma *ret; | ||||||
| 1057 | int err; | ||||||
| 1058 | |||||||
| 1059 | for_i915_gem_ww(&ww, err, true)for (i915_gem_ww_ctx_init(&ww, 1), (err) = -11; (err) == - 11; (err) = __i915_gem_ww_fini(&ww, err)) { | ||||||
| 1060 | err = i915_gem_object_lock(obj, &ww); | ||||||
| 1061 | if (err) | ||||||
| 1062 | continue; | ||||||
| 1063 | |||||||
| 1064 | ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size, | ||||||
| 1065 | alignment, flags); | ||||||
| 1066 | if (IS_ERR(ret)) | ||||||
| 1067 | err = PTR_ERR(ret); | ||||||
| 1068 | } | ||||||
| 1069 | |||||||
| 1070 | return err ? ERR_PTR(err) : ret; | ||||||
| 1071 | } | ||||||
| 1072 | |||||||
| 1073 | int | ||||||
| 1074 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | ||||||
| 1075 | struct drm_file *file_priv) | ||||||
| 1076 | { | ||||||
| 1077 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dev); | ||||||
| 1078 | struct drm_i915_gem_madvise *args = data; | ||||||
| 1079 | struct drm_i915_gem_object *obj; | ||||||
| 1080 | int err; | ||||||
| 1081 | |||||||
| 1082 | switch (args->madv) { | ||||||
| 1083 | case I915_MADV_DONTNEED1: | ||||||
| 1084 | case I915_MADV_WILLNEED0: | ||||||
| 1085 | break; | ||||||
| 1086 | default: | ||||||
| 1087 | return -EINVAL22; | ||||||
| 1088 | } | ||||||
| 1089 | |||||||
| 1090 | obj = i915_gem_object_lookup(file_priv, args->handle); | ||||||
| 1091 | if (!obj) | ||||||
| 1092 | return -ENOENT2; | ||||||
| 1093 | |||||||
| 1094 | err = i915_gem_object_lock_interruptible(obj, NULL((void *)0)); | ||||||
| 1095 | if (err) | ||||||
| 1096 | goto out; | ||||||
| 1097 | |||||||
| 1098 | if (i915_gem_object_has_pages(obj) && | ||||||
| 1099 | i915_gem_object_is_tiled(obj) && | ||||||
| 1100 | i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES(1UL << (0))) { | ||||||
| 1101 | if (obj->mm.madv == I915_MADV_WILLNEED0) { | ||||||
| 1102 | GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj))((void)0); | ||||||
| 1103 | i915_gem_object_clear_tiling_quirk(obj); | ||||||
| 1104 | i915_gem_object_make_shrinkable(obj); | ||||||
| 1105 | } | ||||||
| 1106 | if (args->madv == I915_MADV_WILLNEED0) { | ||||||
| 1107 | GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj))((void)0); | ||||||
| 1108 | i915_gem_object_make_unshrinkable(obj); | ||||||
| 1109 | i915_gem_object_set_tiling_quirk(obj); | ||||||
| 1110 | } | ||||||
| 1111 | } | ||||||
| 1112 | |||||||
| 1113 | if (obj->mm.madv != __I915_MADV_PURGED2) { | ||||||
| 1114 | obj->mm.madv = args->madv; | ||||||
| 1115 | if (obj->ops->adjust_lru) | ||||||
| 1116 | obj->ops->adjust_lru(obj); | ||||||
| 1117 | } | ||||||
| 1118 | |||||||
| 1119 | if (i915_gem_object_has_pages(obj) || | ||||||
| 1120 | i915_gem_object_has_self_managed_shrink_list(obj)) { | ||||||
| 1121 | unsigned long flags; | ||||||
| 1122 | |||||||
| 1123 | spin_lock_irqsave(&i915->mm.obj_lock, flags)do { flags = 0; mtx_enter(&i915->mm.obj_lock); } while (0); | ||||||
| 1124 | if (!list_empty(&obj->mm.link)) { | ||||||
| 1125 | struct list_head *list; | ||||||
| 1126 | |||||||
| 1127 | if (obj->mm.madv != I915_MADV_WILLNEED0) | ||||||
| 1128 | list = &i915->mm.purge_list; | ||||||
| 1129 | else | ||||||
| 1130 | list = &i915->mm.shrink_list; | ||||||
| 1131 | list_move_tail(&obj->mm.link, list); | ||||||
| 1132 | |||||||
| 1133 | } | ||||||
| 1134 | spin_unlock_irqrestore(&i915->mm.obj_lock, flags)do { (void)(flags); mtx_leave(&i915->mm.obj_lock); } while (0); | ||||||
| 1135 | } | ||||||
| 1136 | |||||||
| 1137 | /* if the object is no longer attached, discard its backing storage */ | ||||||
| 1138 | if (obj->mm.madv == I915_MADV_DONTNEED1 && | ||||||
| 1139 | !i915_gem_object_has_pages(obj)) | ||||||
| 1140 | i915_gem_object_truncate(obj); | ||||||
| 1141 | |||||||
| 1142 | args->retained = obj->mm.madv != __I915_MADV_PURGED2; | ||||||
| 1143 | |||||||
| 1144 | i915_gem_object_unlock(obj); | ||||||
| 1145 | out: | ||||||
| 1146 | i915_gem_object_put(obj); | ||||||
| 1147 | return err; | ||||||
| 1148 | } | ||||||
| 1149 | |||||||
| 1150 | /* | ||||||
| 1151 | * A single pass should suffice to release all the freed objects (along most | ||||||
| 1152 | * call paths), but be a little more paranoid in that freeing the objects does | ||||||
| 1153 | * take a little amount of time, during which the rcu callbacks could have added | ||||||
| 1154 | * new objects into the freed list, and armed the work again. | ||||||
| 1155 | */ | ||||||
| 1156 | void i915_gem_drain_freed_objects(struct drm_i915_privateinteldrm_softc *i915) | ||||||
| 1157 | { | ||||||
| 1158 | while (atomic_read(&i915->mm.free_count)({ typeof(*(&i915->mm.free_count)) __tmp = *(volatile typeof (*(&i915->mm.free_count)) *)&(*(&i915->mm.free_count )); membar_datadep_consumer(); __tmp; })) { | ||||||
| 1159 | flush_work(&i915->mm.free_work); | ||||||
| 1160 | flush_delayed_work(&i915->bdev.wq); | ||||||
| 1161 | rcu_barrier()__asm volatile("" : : : "memory"); | ||||||
| 1162 | } | ||||||
| 1163 | } | ||||||
| 1164 | |||||||
| 1165 | /* | ||||||
| 1166 | * Similar to objects above (see i915_gem_drain_freed-objects), in general we | ||||||
| 1167 | * have workers that are armed by RCU and then rearm themselves in their | ||||||
| 1168 | * callbacks. To be paranoid, we need to drain the workqueue a second time after | ||||||
| 1169 | * waiting for the RCU grace period so that we catch work queued via RCU from | ||||||
| 1170 | * the first pass. As neither drain_workqueue() nor flush_workqueue() report a | ||||||
| 1171 | * result, we make an assumption that we only don't require more than 3 passes | ||||||
| 1172 | * to catch all _recursive_ RCU delayed work. | ||||||
| 1173 | */ | ||||||
| 1174 | void i915_gem_drain_workqueue(struct drm_i915_privateinteldrm_softc *i915) | ||||||
| 1175 | { | ||||||
| 1176 | int i; | ||||||
| 1177 | |||||||
| 1178 | for (i = 0; i < 3; i++) { | ||||||
| 1179 | flush_workqueue(i915->wq); | ||||||
| 1180 | rcu_barrier()__asm volatile("" : : : "memory"); | ||||||
| 1181 | i915_gem_drain_freed_objects(i915); | ||||||
| 1182 | } | ||||||
| 1183 | |||||||
| 1184 | drain_workqueue(i915->wq); | ||||||
| 1185 | } | ||||||
| 1186 | |||||||
| 1187 | int i915_gem_init(struct drm_i915_privateinteldrm_softc *dev_priv) | ||||||
| 1188 | { | ||||||
| 1189 | struct intel_gt *gt; | ||||||
| 1190 | unsigned int i; | ||||||
| 1191 | int ret; | ||||||
| 1192 | |||||||
| 1193 | /* We need to fallback to 4K pages if host doesn't support huge gtt. */ | ||||||
| 1194 | if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) | ||||||
| 1195 | RUNTIME_INFO(dev_priv)(&(dev_priv)->__runtime)->page_sizes = I915_GTT_PAGE_SIZE_4K(1ULL << (12)); | ||||||
| 1196 | |||||||
| 1197 | ret = i915_gem_init_userptr(dev_priv); | ||||||
| 1198 | if (ret) | ||||||
| 1199 | return ret; | ||||||
| 1200 | |||||||
| 1201 | intel_uc_fetch_firmwares(&to_gt(dev_priv)->uc); | ||||||
| 1202 | intel_wopcm_init(&dev_priv->wopcm); | ||||||
| 1203 | |||||||
| 1204 | ret = i915_init_ggtt(dev_priv); | ||||||
| 1205 | if (ret) { | ||||||
| 1206 | GEM_BUG_ON(ret == -EIO)((void)0); | ||||||
| 1207 | goto err_unlock; | ||||||
| 1208 | } | ||||||
| 1209 | |||||||
| 1210 | /* | ||||||
| 1211 | * Despite its name intel_init_clock_gating applies both display | ||||||
| 1212 | * clock gating workarounds; GT mmio workarounds and the occasional | ||||||
| 1213 | * GT power context workaround. Worse, sometimes it includes a context | ||||||
| 1214 | * register workaround which we need to apply before we record the | ||||||
| 1215 | * default HW state for all contexts. | ||||||
| 1216 | * | ||||||
| 1217 | * FIXME: break up the workarounds and apply them at the right time! | ||||||
| 1218 | */ | ||||||
| 1219 | intel_init_clock_gating(dev_priv); | ||||||
| 1220 | |||||||
| 1221 | for_each_gt(gt, dev_priv, i)for ((i) = 0; (i) < 4; (i)++) if (!(((gt) = (dev_priv)-> gt[(i)]))) {} else { | ||||||
| 1222 | ret = intel_gt_init(gt); | ||||||
| 1223 | if (ret) | ||||||
| 1224 | goto err_unlock; | ||||||
| 1225 | } | ||||||
| 1226 | |||||||
| 1227 | return 0; | ||||||
| 1228 | |||||||
| 1229 | /* | ||||||
| 1230 | * Unwinding is complicated by that we want to handle -EIO to mean | ||||||
| 1231 | * disable GPU submission but keep KMS alive. We want to mark the | ||||||
| 1232 | * HW as irrevisibly wedged, but keep enough state around that the | ||||||
| 1233 | * driver doesn't explode during runtime. | ||||||
| 1234 | */ | ||||||
| 1235 | err_unlock: | ||||||
| 1236 | i915_gem_drain_workqueue(dev_priv); | ||||||
| 1237 | |||||||
| 1238 | if (ret != -EIO5) { | ||||||
| 1239 | for_each_gt(gt, dev_priv, i)for ((i) = 0; (i) < 4; (i)++) if (!(((gt) = (dev_priv)-> gt[(i)]))) {} else { | ||||||
| 1240 | intel_gt_driver_remove(gt); | ||||||
| 1241 | intel_gt_driver_release(gt); | ||||||
| 1242 | intel_uc_cleanup_firmwares(>->uc); | ||||||
| 1243 | } | ||||||
| 1244 | } | ||||||
| 1245 | |||||||
| 1246 | if (ret == -EIO5) { | ||||||
| 1247 | /* | ||||||
| 1248 | * Allow engines or uC initialisation to fail by marking the GPU | ||||||
| 1249 | * as wedged. But we only want to do this when the GPU is angry, | ||||||
| 1250 | * for all other failure, such as an allocation failure, bail. | ||||||
| 1251 | */ | ||||||
| 1252 | for_each_gt(gt, dev_priv, i)for ((i) = 0; (i) < 4; (i)++) if (!(((gt) = (dev_priv)-> gt[(i)]))) {} else { | ||||||
| 1253 | if (!intel_gt_is_wedged(gt)) { | ||||||
| 1254 | i915_probe_error(dev_priv,__i915_printk(dev_priv, 0 ? "\0017" : "\0013", "Failed to initialize GPU, declaring it wedged!\n" ) | ||||||
| 1255 | "Failed to initialize GPU, declaring it wedged!\n")__i915_printk(dev_priv, 0 ? "\0017" : "\0013", "Failed to initialize GPU, declaring it wedged!\n" ); | ||||||
| 1256 | intel_gt_set_wedged(gt); | ||||||
| 1257 | } | ||||||
| 1258 | } | ||||||
| 1259 | |||||||
| 1260 | /* Minimal basic recovery for KMS */ | ||||||
| 1261 | ret = i915_ggtt_enable_hw(dev_priv); | ||||||
| 1262 | i915_ggtt_resume(to_gt(dev_priv)->ggtt); | ||||||
| 1263 | intel_init_clock_gating(dev_priv); | ||||||
| 1264 | } | ||||||
| 1265 | |||||||
| 1266 | i915_gem_drain_freed_objects(dev_priv); | ||||||
| 1267 | |||||||
| 1268 | return ret; | ||||||
| 1269 | } | ||||||
| 1270 | |||||||
| 1271 | void i915_gem_driver_register(struct drm_i915_privateinteldrm_softc *i915) | ||||||
| 1272 | { | ||||||
| 1273 | i915_gem_driver_register__shrinker(i915); | ||||||
| 1274 | |||||||
| 1275 | intel_engines_driver_register(i915); | ||||||
| 1276 | } | ||||||
| 1277 | |||||||
| 1278 | void i915_gem_driver_unregister(struct drm_i915_privateinteldrm_softc *i915) | ||||||
| 1279 | { | ||||||
| 1280 | i915_gem_driver_unregister__shrinker(i915); | ||||||
| 1281 | } | ||||||
| 1282 | |||||||
| 1283 | void i915_gem_driver_remove(struct drm_i915_privateinteldrm_softc *dev_priv) | ||||||
| 1284 | { | ||||||
| 1285 | struct intel_gt *gt; | ||||||
| 1286 | unsigned int i; | ||||||
| 1287 | |||||||
| 1288 | i915_gem_suspend_late(dev_priv); | ||||||
| 1289 | for_each_gt(gt, dev_priv, i)for ((i) = 0; (i) < 4; (i)++) if (!(((gt) = (dev_priv)-> gt[(i)]))) {} else | ||||||
| 1290 | intel_gt_driver_remove(gt); | ||||||
| 1291 | dev_priv->uabi_engines = RB_ROOT(struct rb_root) { ((void *)0) }; | ||||||
| 1292 | |||||||
| 1293 | /* Flush any outstanding unpin_work. */ | ||||||
| 1294 | i915_gem_drain_workqueue(dev_priv); | ||||||
| 1295 | |||||||
| 1296 | i915_gem_drain_freed_objects(dev_priv); | ||||||
| 1297 | } | ||||||
| 1298 | |||||||
| 1299 | void i915_gem_driver_release(struct drm_i915_privateinteldrm_softc *dev_priv) | ||||||
| 1300 | { | ||||||
| 1301 | struct intel_gt *gt; | ||||||
| 1302 | unsigned int i; | ||||||
| 1303 | |||||||
| 1304 | for_each_gt(gt, dev_priv, i)for ((i) = 0; (i) < 4; (i)++) if (!(((gt) = (dev_priv)-> gt[(i)]))) {} else { | ||||||
| 1305 | intel_gt_driver_release(gt); | ||||||
| 1306 | intel_uc_cleanup_firmwares(>->uc); | ||||||
| 1307 | } | ||||||
| 1308 | |||||||
| 1309 | /* Flush any outstanding work, including i915_gem_context.release_work. */ | ||||||
| 1310 | i915_gem_drain_workqueue(dev_priv); | ||||||
| 1311 | |||||||
| 1312 | drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list))({ int __ret = !!((!list_empty(&dev_priv->gem.contexts .list))); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&dev_priv->drm))->dev), "", "drm_WARN_ON(" "!list_empty(&dev_priv->gem.contexts.list)" ")"); __builtin_expect(!!(__ret), 0); }); | ||||||
| 1313 | } | ||||||
| 1314 | |||||||
| 1315 | static void i915_gem_init__mm(struct drm_i915_privateinteldrm_softc *i915) | ||||||
| 1316 | { | ||||||
| 1317 | mtx_init(&i915->mm.obj_lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&i915-> mm.obj_lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9)))); } while (0); | ||||||
| 1318 | |||||||
| 1319 | init_llist_head(&i915->mm.free_list); | ||||||
| 1320 | |||||||
| 1321 | INIT_LIST_HEAD(&i915->mm.purge_list); | ||||||
| 1322 | INIT_LIST_HEAD(&i915->mm.shrink_list); | ||||||
| 1323 | |||||||
| 1324 | i915_gem_init__objects(i915); | ||||||
| 1325 | } | ||||||
| 1326 | |||||||
| 1327 | void i915_gem_init_early(struct drm_i915_privateinteldrm_softc *dev_priv) | ||||||
| 1328 | { | ||||||
| 1329 | i915_gem_init__mm(dev_priv); | ||||||
| 1330 | i915_gem_init__contexts(dev_priv); | ||||||
| 1331 | |||||||
| 1332 | mtx_init(&dev_priv->display.fb_tracking.lock, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&dev_priv ->display.fb_tracking.lock), ((((0x0)) > 0x0 && ((0x0)) < 0x9) ? 0x9 : ((0x0)))); } while (0); | ||||||
| 1333 | } | ||||||
| 1334 | |||||||
| 1335 | void i915_gem_cleanup_early(struct drm_i915_privateinteldrm_softc *dev_priv) | ||||||
| 1336 | { | ||||||
| 1337 | i915_gem_drain_freed_objects(dev_priv); | ||||||
| 1338 | GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list))((void)0); | ||||||
| 1339 | GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count))((void)0); | ||||||
| 1340 | drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count)({ int __ret = !!((dev_priv->mm.shrink_count)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&dev_priv-> drm))->dev), "", "drm_WARN_ON(" "dev_priv->mm.shrink_count" ")"); __builtin_expect(!!(__ret), 0); }); | ||||||
| 1341 | } | ||||||
| 1342 | |||||||
| 1343 | int i915_gem_open(struct drm_i915_privateinteldrm_softc *i915, struct drm_file *file) | ||||||
| 1344 | { | ||||||
| 1345 | struct drm_i915_file_private *file_priv; | ||||||
| 1346 | struct i915_drm_client *client; | ||||||
| 1347 | int ret = -ENOMEM12; | ||||||
| 1348 | |||||||
| 1349 | DRM_DEBUG("\n")___drm_dbg(((void *)0), DRM_UT_CORE, "\n"); | ||||||
| 1350 | |||||||
| 1351 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL(0x0001 | 0x0004)); | ||||||
| 1352 | if (!file_priv) | ||||||
| 1353 | goto err_alloc; | ||||||
| 1354 | |||||||
| 1355 | client = i915_drm_client_add(&i915->clients); | ||||||
| 1356 | if (IS_ERR(client)) { | ||||||
| 1357 | ret = PTR_ERR(client); | ||||||
| 1358 | goto err_client; | ||||||
| 1359 | } | ||||||
| 1360 | |||||||
| 1361 | file->driver_priv = file_priv; | ||||||
| 1362 | file_priv->dev_priv = i915; | ||||||
| 1363 | file_priv->file = file; | ||||||
| 1364 | file_priv->client = client; | ||||||
| 1365 | |||||||
| 1366 | file_priv->bsd_engine = -1; | ||||||
| 1367 | file_priv->hang_timestamp = jiffies; | ||||||
| 1368 | |||||||
| 1369 | ret = i915_gem_context_open(i915, file); | ||||||
| 1370 | if (ret) | ||||||
| 1371 | goto err_context; | ||||||
| 1372 | |||||||
| 1373 | return 0; | ||||||
| 1374 | |||||||
| 1375 | err_context: | ||||||
| 1376 | i915_drm_client_put(client); | ||||||
| 1377 | err_client: | ||||||
| 1378 | kfree(file_priv); | ||||||
| 1379 | err_alloc: | ||||||
| 1380 | return ret; | ||||||
| 1381 | } | ||||||
| 1382 | |||||||
| 1383 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0 | ||||||
| 1384 | #include "selftests/mock_gem_device.c" | ||||||
| 1385 | #include "selftests/i915_gem.c" | ||||||
| 1386 | #endif |
| 1 | /************************************************************************** |
| 2 | * |
| 3 | * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. |
| 4 | * Copyright 2016 Intel Corporation |
| 5 | * All Rights Reserved. |
| 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 8 | * copy of this software and associated documentation files (the |
| 9 | * "Software"), to deal in the Software without restriction, including |
| 10 | * without limitation the rights to use, copy, modify, merge, publish, |
| 11 | * distribute, sub license, and/or sell copies of the Software, and to |
| 12 | * permit persons to whom the Software is furnished to do so, subject to |
| 13 | * the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice (including the |
| 16 | * next paragraph) shall be included in all copies or substantial portions |
| 17 | * of the Software. |
| 18 | * |
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 26 | * |
| 27 | * |
| 28 | **************************************************************************/ |
| 29 | /* |
| 30 | * Authors: |
| 31 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
| 32 | */ |
| 33 | |
| 34 | #ifndef _DRM_MM_H_ |
| 35 | #define _DRM_MM_H_ |
| 36 | |
| 37 | /* |
| 38 | * Generic range manager structs |
| 39 | */ |
| 40 | #include <linux/bug.h> |
| 41 | #include <linux/rbtree.h> |
| 42 | #include <linux/limits.h> |
| 43 | #include <linux/mm_types.h> |
| 44 | #include <linux/list.h> |
| 45 | #include <linux/spinlock.h> |
| 46 | #ifdef CONFIG_DRM_DEBUG_MM |
| 47 | #include <linux/stackdepot.h> |
| 48 | #endif |
| 49 | #include <linux/types.h> |
| 50 | |
| 51 | #include <drm/drm_print.h> |
| 52 | |
| 53 | #ifdef CONFIG_DRM_DEBUG_MM |
| 54 | #define DRM_MM_BUG_ON(expr)((void)0) BUG_ON(expr)((!(expr)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/include/drm/drm_mm.h" , 54, "!(expr)")) |
| 55 | #else |
| 56 | #define DRM_MM_BUG_ON(expr)((void)0) BUILD_BUG_ON_INVALID(expr)((void)0) |
| 57 | #endif |
| 58 | |
| 59 | /** |
| 60 | * enum drm_mm_insert_mode - control search and allocation behaviour |
| 61 | * |
| 62 | * The &struct drm_mm range manager supports finding a suitable modes using |
| 63 | * a number of search trees. These trees are oranised by size, by address and |
| 64 | * in most recent eviction order. This allows the user to find either the |
| 65 | * smallest hole to reuse, the lowest or highest address to reuse, or simply |
| 66 | * reuse the most recent eviction that fits. When allocating the &drm_mm_node |
| 67 | * from within the hole, the &drm_mm_insert_mode also dictate whether to |
| 68 | * allocate the lowest matching address or the highest. |
| 69 | */ |
| 70 | enum drm_mm_insert_mode { |
| 71 | /** |
| 72 | * @DRM_MM_INSERT_BEST: |
| 73 | * |
| 74 | * Search for the smallest hole (within the search range) that fits |
| 75 | * the desired node. |
| 76 | * |
| 77 | * Allocates the node from the bottom of the found hole. |
| 78 | */ |
| 79 | DRM_MM_INSERT_BEST = 0, |
| 80 | |
| 81 | /** |
| 82 | * @DRM_MM_INSERT_LOW: |
| 83 | * |
| 84 | * Search for the lowest hole (address closest to 0, within the search |
| 85 | * range) that fits the desired node. |
| 86 | * |
| 87 | * Allocates the node from the bottom of the found hole. |
| 88 | */ |
| 89 | DRM_MM_INSERT_LOW, |
| 90 | |
| 91 | /** |
| 92 | * @DRM_MM_INSERT_HIGH: |
| 93 | * |
| 94 | * Search for the highest hole (address closest to U64_MAX, within the |
| 95 | * search range) that fits the desired node. |
| 96 | * |
| 97 | * Allocates the node from the *top* of the found hole. The specified |
| 98 | * alignment for the node is applied to the base of the node |
| 99 | * (&drm_mm_node.start). |
| 100 | */ |
| 101 | DRM_MM_INSERT_HIGH, |
| 102 | |
| 103 | /** |
| 104 | * @DRM_MM_INSERT_EVICT: |
| 105 | * |
| 106 | * Search for the most recently evicted hole (within the search range) |
| 107 | * that fits the desired node. This is appropriate for use immediately |
| 108 | * after performing an eviction scan (see drm_mm_scan_init()) and |
| 109 | * removing the selected nodes to form a hole. |
| 110 | * |
| 111 | * Allocates the node from the bottom of the found hole. |
| 112 | */ |
| 113 | DRM_MM_INSERT_EVICT, |
| 114 | |
| 115 | /** |
| 116 | * @DRM_MM_INSERT_ONCE: |
| 117 | * |
| 118 | * Only check the first hole for suitablity and report -ENOSPC |
| 119 | * immediately otherwise, rather than check every hole until a |
| 120 | * suitable one is found. Can only be used in conjunction with another |
| 121 | * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW. |
| 122 | */ |
| 123 | DRM_MM_INSERT_ONCE = BIT(31)(1UL << (31)), |
| 124 | |
| 125 | /** |
| 126 | * @DRM_MM_INSERT_HIGHEST: |
| 127 | * |
| 128 | * Only check the highest hole (the hole with the largest address) and |
| 129 | * insert the node at the top of the hole or report -ENOSPC if |
| 130 | * unsuitable. |
| 131 | * |
| 132 | * Does not search all holes. |
| 133 | */ |
| 134 | DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE, |
| 135 | |
| 136 | /** |
| 137 | * @DRM_MM_INSERT_LOWEST: |
| 138 | * |
| 139 | * Only check the lowest hole (the hole with the smallest address) and |
| 140 | * insert the node at the bottom of the hole or report -ENOSPC if |
| 141 | * unsuitable. |
| 142 | * |
| 143 | * Does not search all holes. |
| 144 | */ |
| 145 | DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE, |
| 146 | }; |
| 147 | |
| 148 | /** |
| 149 | * struct drm_mm_node - allocated block in the DRM allocator |
| 150 | * |
| 151 | * This represents an allocated block in a &drm_mm allocator. Except for |
| 152 | * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is |
| 153 | * entirely opaque and should only be accessed through the provided funcions. |
| 154 | * Since allocation of these nodes is entirely handled by the driver they can be |
| 155 | * embedded. |
| 156 | */ |
| 157 | struct drm_mm_node { |
| 158 | /** @color: Opaque driver-private tag. */ |
| 159 | unsigned long color; |
| 160 | /** @start: Start address of the allocated block. */ |
| 161 | u64 start; |
| 162 | /** @size: Size of the allocated block. */ |
| 163 | u64 size; |
| 164 | /* private: */ |
| 165 | struct drm_mm *mm; |
| 166 | struct list_head node_list; |
| 167 | struct list_head hole_stack; |
| 168 | struct rb_node rb; |
| 169 | struct rb_node rb_hole_size; |
| 170 | struct rb_node rb_hole_addr; |
| 171 | u64 __subtree_last; |
| 172 | u64 hole_size; |
| 173 | unsigned long flags; |
| 174 | #define DRM_MM_NODE_ALLOCATED_BIT0 0 |
| 175 | #define DRM_MM_NODE_SCANNED_BIT1 1 |
| 176 | #ifdef CONFIG_DRM_DEBUG_MM |
| 177 | depot_stack_handle_t stack; |
| 178 | #endif |
| 179 | }; |
| 180 | |
| 181 | /** |
| 182 | * struct drm_mm - DRM allocator |
| 183 | * |
| 184 | * DRM range allocator with a few special functions and features geared towards |
| 185 | * managing GPU memory. Except for the @color_adjust callback the structure is |
| 186 | * entirely opaque and should only be accessed through the provided functions |
| 187 | * and macros. This structure can be embedded into larger driver structures. |
| 188 | */ |
| 189 | struct drm_mm { |
| 190 | /** |
| 191 | * @color_adjust: |
| 192 | * |
| 193 | * Optional driver callback to further apply restrictions on a hole. The |
| 194 | * node argument points at the node containing the hole from which the |
| 195 | * block would be allocated (see drm_mm_hole_follows() and friends). The |
| 196 | * other arguments are the size of the block to be allocated. The driver |
| 197 | * can adjust the start and end as needed to e.g. insert guard pages. |
| 198 | */ |
| 199 | void (*color_adjust)(const struct drm_mm_node *node, |
| 200 | unsigned long color, |
| 201 | u64 *start, u64 *end); |
| 202 | |
| 203 | /* private: */ |
| 204 | /* List of all memory nodes that immediately precede a free hole. */ |
| 205 | struct list_head hole_stack; |
| 206 | /* head_node.node_list is the list of all memory nodes, ordered |
| 207 | * according to the (increasing) start address of the memory node. */ |
| 208 | struct drm_mm_node head_node; |
| 209 | /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ |
| 210 | struct rb_root_cached interval_tree; |
| 211 | struct rb_root_cached holes_size; |
| 212 | struct rb_root holes_addr; |
| 213 | |
| 214 | unsigned long scan_active; |
| 215 | }; |
| 216 | |
| 217 | /** |
| 218 | * struct drm_mm_scan - DRM allocator eviction roaster data |
| 219 | * |
| 220 | * This structure tracks data needed for the eviction roaster set up using |
| 221 | * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and |
| 222 | * drm_mm_scan_remove_block(). The structure is entirely opaque and should only |
| 223 | * be accessed through the provided functions and macros. It is meant to be |
| 224 | * allocated temporarily by the driver on the stack. |
| 225 | */ |
| 226 | struct drm_mm_scan { |
| 227 | /* private: */ |
| 228 | struct drm_mm *mm; |
| 229 | |
| 230 | u64 size; |
| 231 | u64 alignment; |
| 232 | u64 remainder_mask; |
| 233 | |
| 234 | u64 range_start; |
| 235 | u64 range_end; |
| 236 | |
| 237 | u64 hit_start; |
| 238 | u64 hit_end; |
| 239 | |
| 240 | unsigned long color; |
| 241 | enum drm_mm_insert_mode mode; |
| 242 | }; |
| 243 | |
| 244 | /** |
| 245 | * drm_mm_node_allocated - checks whether a node is allocated |
| 246 | * @node: drm_mm_node to check |
| 247 | * |
| 248 | * Drivers are required to clear a node prior to using it with the |
| 249 | * drm_mm range manager. |
| 250 | * |
| 251 | * Drivers should use this helper for proper encapsulation of drm_mm |
| 252 | * internals. |
| 253 | * |
| 254 | * Returns: |
| 255 | * True if the @node is allocated. |
| 256 | */ |
| 257 | static inline bool_Bool drm_mm_node_allocated(const struct drm_mm_node *node) |
| 258 | { |
| 259 | return test_bit(DRM_MM_NODE_ALLOCATED_BIT0, &node->flags); |
| 260 | } |
| 261 | |
| 262 | /** |
| 263 | * drm_mm_initialized - checks whether an allocator is initialized |
| 264 | * @mm: drm_mm to check |
| 265 | * |
| 266 | * Drivers should clear the struct drm_mm prior to initialisation if they |
| 267 | * want to use this function. |
| 268 | * |
| 269 | * Drivers should use this helper for proper encapsulation of drm_mm |
| 270 | * internals. |
| 271 | * |
| 272 | * Returns: |
| 273 | * True if the @mm is initialized. |
| 274 | */ |
| 275 | static inline bool_Bool drm_mm_initialized(const struct drm_mm *mm) |
| 276 | { |
| 277 | return READ_ONCE(mm->hole_stack.next)({ typeof(mm->hole_stack.next) __tmp = *(volatile typeof(mm ->hole_stack.next) *)&(mm->hole_stack.next); membar_datadep_consumer (); __tmp; }); |
| 278 | } |
| 279 | |
| 280 | /** |
| 281 | * drm_mm_hole_follows - checks whether a hole follows this node |
| 282 | * @node: drm_mm_node to check |
| 283 | * |
| 284 | * Holes are embedded into the drm_mm using the tail of a drm_mm_node. |
| 285 | * If you wish to know whether a hole follows this particular node, |
| 286 | * query this function. See also drm_mm_hole_node_start() and |
| 287 | * drm_mm_hole_node_end(). |
| 288 | * |
| 289 | * Returns: |
| 290 | * True if a hole follows the @node. |
| 291 | */ |
| 292 | static inline bool_Bool drm_mm_hole_follows(const struct drm_mm_node *node) |
| 293 | { |
| 294 | return node->hole_size; |
| 295 | } |
| 296 | |
| 297 | static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) |
| 298 | { |
| 299 | return hole_node->start + hole_node->size; |
| 300 | } |
| 301 | |
| 302 | /** |
| 303 | * drm_mm_hole_node_start - computes the start of the hole following @node |
| 304 | * @hole_node: drm_mm_node which implicitly tracks the following hole |
| 305 | * |
| 306 | * This is useful for driver-specific debug dumpers. Otherwise drivers should |
| 307 | * not inspect holes themselves. Drivers must check first whether a hole indeed |
| 308 | * follows by looking at drm_mm_hole_follows() |
| 309 | * |
| 310 | * Returns: |
| 311 | * Start of the subsequent hole. |
| 312 | */ |
| 313 | static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) |
| 314 | { |
| 315 | DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node))((void)0); |
| 316 | return __drm_mm_hole_node_start(hole_node); |
| 317 | } |
| 318 | |
| 319 | static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) |
| 320 | { |
| 321 | return list_next_entry(hole_node, node_list)({ const __typeof( ((typeof(*(hole_node)) *)0)->node_list ) *__mptr = (((hole_node)->node_list.next)); (typeof(*(hole_node )) *)( (char *)__mptr - __builtin_offsetof(typeof(*(hole_node )), node_list) );})->start; |
| 322 | } |
| 323 | |
| 324 | /** |
| 325 | * drm_mm_hole_node_end - computes the end of the hole following @node |
| 326 | * @hole_node: drm_mm_node which implicitly tracks the following hole |
| 327 | * |
| 328 | * This is useful for driver-specific debug dumpers. Otherwise drivers should |
| 329 | * not inspect holes themselves. Drivers must check first whether a hole indeed |
| 330 | * follows by looking at drm_mm_hole_follows(). |
| 331 | * |
| 332 | * Returns: |
| 333 | * End of the subsequent hole. |
| 334 | */ |
| 335 | static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) |
| 336 | { |
| 337 | return __drm_mm_hole_node_end(hole_node); |
| 338 | } |
| 339 | |
| 340 | /** |
| 341 | * drm_mm_nodes - list of nodes under the drm_mm range manager |
| 342 | * @mm: the struct drm_mm range manger |
| 343 | * |
| 344 | * As the drm_mm range manager hides its node_list deep with its |
| 345 | * structure, extracting it looks painful and repetitive. This is |
| 346 | * not expected to be used outside of the drm_mm_for_each_node() |
| 347 | * macros and similar internal functions. |
| 348 | * |
| 349 | * Returns: |
| 350 | * The node list, may be empty. |
| 351 | */ |
| 352 | #define drm_mm_nodes(mm)(&(mm)->head_node.node_list) (&(mm)->head_node.node_list) |
| 353 | |
| 354 | /** |
| 355 | * drm_mm_for_each_node - iterator to walk over all allocated nodes |
| 356 | * @entry: &struct drm_mm_node to assign to in each iteration step |
| 357 | * @mm: &drm_mm allocator to walk |
| 358 | * |
| 359 | * This iterator walks over all nodes in the range allocator. It is implemented |
| 360 | * with list_for_each(), so not save against removal of elements. |
| 361 | */ |
| 362 | #define drm_mm_for_each_node(entry, mm)for (entry = ({ const __typeof( ((__typeof(*entry) *)0)->node_list ) *__mptr = (((&(mm)->head_node.node_list))->next) ; (__typeof(*entry) *)( (char *)__mptr - __builtin_offsetof(__typeof (*entry), node_list) );}); &entry->node_list != ((& (mm)->head_node.node_list)); entry = ({ const __typeof( (( __typeof(*entry) *)0)->node_list ) *__mptr = (entry->node_list .next); (__typeof(*entry) *)( (char *)__mptr - __builtin_offsetof (__typeof(*entry), node_list) );})) \ |
| 363 | list_for_each_entry(entry, drm_mm_nodes(mm), node_list)for (entry = ({ const __typeof( ((__typeof(*entry) *)0)->node_list ) *__mptr = (((&(mm)->head_node.node_list))->next) ; (__typeof(*entry) *)( (char *)__mptr - __builtin_offsetof(__typeof (*entry), node_list) );}); &entry->node_list != ((& (mm)->head_node.node_list)); entry = ({ const __typeof( (( __typeof(*entry) *)0)->node_list ) *__mptr = (entry->node_list .next); (__typeof(*entry) *)( (char *)__mptr - __builtin_offsetof (__typeof(*entry), node_list) );})) |
| 364 | |
| 365 | /** |
| 366 | * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes |
| 367 | * @entry: &struct drm_mm_node to assign to in each iteration step |
| 368 | * @next: &struct drm_mm_node to store the next step |
| 369 | * @mm: &drm_mm allocator to walk |
| 370 | * |
| 371 | * This iterator walks over all nodes in the range allocator. It is implemented |
| 372 | * with list_for_each_safe(), so save against removal of elements. |
| 373 | */ |
| 374 | #define drm_mm_for_each_node_safe(entry, next, mm)for (entry = ({ const __typeof( ((__typeof(*entry) *)0)->node_list ) *__mptr = (((&(mm)->head_node.node_list))->next) ; (__typeof(*entry) *)( (char *)__mptr - __builtin_offsetof(__typeof (*entry), node_list) );}), next = ({ const __typeof( ((__typeof (*entry) *)0)->node_list ) *__mptr = (entry->node_list. next); (__typeof(*entry) *)( (char *)__mptr - __builtin_offsetof (__typeof(*entry), node_list) );}); &entry->node_list != ((&(mm)->head_node.node_list)); entry = next, next = ( { const __typeof( ((__typeof(*next) *)0)->node_list ) *__mptr = (next->node_list.next); (__typeof(*next) *)( (char *)__mptr - __builtin_offsetof(__typeof(*next), node_list) );})) \ |
| 375 | list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)for (entry = ({ const __typeof( ((__typeof(*entry) *)0)->node_list ) *__mptr = (((&(mm)->head_node.node_list))->next) ; (__typeof(*entry) *)( (char *)__mptr - __builtin_offsetof(__typeof (*entry), node_list) );}), next = ({ const __typeof( ((__typeof (*entry) *)0)->node_list ) *__mptr = (entry->node_list. next); (__typeof(*entry) *)( (char *)__mptr - __builtin_offsetof (__typeof(*entry), node_list) );}); &entry->node_list != ((&(mm)->head_node.node_list)); entry = next, next = ( { const __typeof( ((__typeof(*next) *)0)->node_list ) *__mptr = (next->node_list.next); (__typeof(*next) *)( (char *)__mptr - __builtin_offsetof(__typeof(*next), node_list) );})) |
| 376 | |
| 377 | /** |
| 378 | * drm_mm_for_each_hole - iterator to walk over all holes |
| 379 | * @pos: &drm_mm_node used internally to track progress |
| 380 | * @mm: &drm_mm allocator to walk |
| 381 | * @hole_start: ulong variable to assign the hole start to on each iteration |
| 382 | * @hole_end: ulong variable to assign the hole end to on each iteration |
| 383 | * |
| 384 | * This iterator walks over all holes in the range allocator. It is implemented |
| 385 | * with list_for_each(), so not save against removal of elements. @entry is used |
| 386 | * internally and will not reflect a real drm_mm_node for the very first hole. |
| 387 | * Hence users of this iterator may not access it. |
| 388 | * |
| 389 | * Implementation Note: |
| 390 | * We need to inline list_for_each_entry in order to be able to set hole_start |
| 391 | * and hole_end on each iteration while keeping the macro sane. |
| 392 | */ |
| 393 | #define drm_mm_for_each_hole(pos, mm, hole_start, hole_end)for (pos = ({ const __typeof( ((typeof(*pos) *)0)->hole_stack ) *__mptr = ((&(mm)->hole_stack)->next); (typeof(* pos) *)( (char *)__mptr - __builtin_offsetof(typeof(*pos), hole_stack ) );}); &pos->hole_stack != &(mm)->hole_stack ? hole_start = drm_mm_hole_node_start(pos), hole_end = hole_start + pos->hole_size, 1 : 0; pos = ({ const __typeof( ((typeof (*(pos)) *)0)->hole_stack ) *__mptr = (((pos)->hole_stack .next)); (typeof(*(pos)) *)( (char *)__mptr - __builtin_offsetof (typeof(*(pos)), hole_stack) );})) \ |
| 394 | for (pos = list_first_entry(&(mm)->hole_stack, \({ const __typeof( ((typeof(*pos) *)0)->hole_stack ) *__mptr = ((&(mm)->hole_stack)->next); (typeof(*pos) *)( ( char *)__mptr - __builtin_offsetof(typeof(*pos), hole_stack) ) ;}) |
| 395 | typeof(*pos), hole_stack)({ const __typeof( ((typeof(*pos) *)0)->hole_stack ) *__mptr = ((&(mm)->hole_stack)->next); (typeof(*pos) *)( ( char *)__mptr - __builtin_offsetof(typeof(*pos), hole_stack) ) ;}); \ |
| 396 | &pos->hole_stack != &(mm)->hole_stack ? \ |
| 397 | hole_start = drm_mm_hole_node_start(pos), \ |
| 398 | hole_end = hole_start + pos->hole_size, \ |
| 399 | 1 : 0; \ |
| 400 | pos = list_next_entry(pos, hole_stack)({ const __typeof( ((typeof(*(pos)) *)0)->hole_stack ) *__mptr = (((pos)->hole_stack.next)); (typeof(*(pos)) *)( (char * )__mptr - __builtin_offsetof(typeof(*(pos)), hole_stack) );})) |
| 401 | |
| 402 | /* |
| 403 | * Basic range manager support (drm_mm.c) |
| 404 | */ |
| 405 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); |
| 406 | int drm_mm_insert_node_in_range(struct drm_mm *mm, |
| 407 | struct drm_mm_node *node, |
| 408 | u64 size, |
| 409 | u64 alignment, |
| 410 | unsigned long color, |
| 411 | u64 start, |
| 412 | u64 end, |
| 413 | enum drm_mm_insert_mode mode); |
| 414 | |
| 415 | /** |
| 416 | * drm_mm_insert_node_generic - search for space and insert @node |
| 417 | * @mm: drm_mm to allocate from |
| 418 | * @node: preallocate node to insert |
| 419 | * @size: size of the allocation |
| 420 | * @alignment: alignment of the allocation |
| 421 | * @color: opaque tag value to use for this node |
| 422 | * @mode: fine-tune the allocation search and placement |
| 423 | * |
| 424 | * This is a simplified version of drm_mm_insert_node_in_range() with no |
| 425 | * range restrictions applied. |
| 426 | * |
| 427 | * The preallocated node must be cleared to 0. |
| 428 | * |
| 429 | * Returns: |
| 430 | * 0 on success, -ENOSPC if there's no suitable hole. |
| 431 | */ |
| 432 | static inline int |
| 433 | drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, |
| 434 | u64 size, u64 alignment, |
| 435 | unsigned long color, |
| 436 | enum drm_mm_insert_mode mode) |
| 437 | { |
| 438 | return drm_mm_insert_node_in_range(mm, node, |
| 439 | size, alignment, color, |
| 440 | 0, U64_MAX0xffffffffffffffffULL, mode); |
| 441 | } |
| 442 | |
| 443 | /** |
| 444 | * drm_mm_insert_node - search for space and insert @node |
| 445 | * @mm: drm_mm to allocate from |
| 446 | * @node: preallocate node to insert |
| 447 | * @size: size of the allocation |
| 448 | * |
| 449 | * This is a simplified version of drm_mm_insert_node_generic() with @color set |
| 450 | * to 0. |
| 451 | * |
| 452 | * The preallocated node must be cleared to 0. |
| 453 | * |
| 454 | * Returns: |
| 455 | * 0 on success, -ENOSPC if there's no suitable hole. |
| 456 | */ |
| 457 | static inline int drm_mm_insert_node(struct drm_mm *mm, |
| 458 | struct drm_mm_node *node, |
| 459 | u64 size) |
| 460 | { |
| 461 | return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0); |
| 462 | } |
| 463 | |
| 464 | void drm_mm_remove_node(struct drm_mm_node *node); |
| 465 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); |
| 466 | void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); |
| 467 | void drm_mm_takedown(struct drm_mm *mm); |
| 468 | |
| 469 | /** |
| 470 | * drm_mm_clean - checks whether an allocator is clean |
| 471 | * @mm: drm_mm allocator to check |
| 472 | * |
| 473 | * Returns: |
| 474 | * True if the allocator is completely free, false if there's still a node |
| 475 | * allocated in it. |
| 476 | */ |
| 477 | static inline bool_Bool drm_mm_clean(const struct drm_mm *mm) |
| 478 | { |
| 479 | return list_empty(drm_mm_nodes(mm)(&(mm)->head_node.node_list)); |
| 480 | } |
| 481 | |
| 482 | struct drm_mm_node * |
| 483 | __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); |
| 484 | |
| 485 | /** |
| 486 | * drm_mm_for_each_node_in_range - iterator to walk over a range of |
| 487 | * allocated nodes |
| 488 | * @node__: drm_mm_node structure to assign to in each iteration step |
| 489 | * @mm__: drm_mm allocator to walk |
| 490 | * @start__: starting offset, the first node will overlap this |
| 491 | * @end__: ending offset, the last node will start before this (but may overlap) |
| 492 | * |
| 493 | * This iterator walks over all nodes in the range allocator that lie |
| 494 | * between @start and @end. It is implemented similarly to list_for_each(), |
| 495 | * but using the internal interval tree to accelerate the search for the |
| 496 | * starting node, and so not safe against removal of elements. It assumes |
| 497 | * that @end is within (or is the upper limit of) the drm_mm allocator. |
| 498 | * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk |
| 499 | * over the special _unallocated_ &drm_mm.head_node, and may even continue |
| 500 | * indefinitely. |
| 501 | */ |
| 502 | #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__)for (node__ = __drm_mm_interval_first((mm__), (start__), (end__ )-1); node__->start < (end__); node__ = ({ const __typeof ( ((typeof(*(node__)) *)0)->node_list ) *__mptr = (((node__ )->node_list.next)); (typeof(*(node__)) *)( (char *)__mptr - __builtin_offsetof(typeof(*(node__)), node_list) );})) \ |
| 503 | for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ |
| 504 | node__->start < (end__); \ |
| 505 | node__ = list_next_entry(node__, node_list)({ const __typeof( ((typeof(*(node__)) *)0)->node_list ) * __mptr = (((node__)->node_list.next)); (typeof(*(node__)) * )( (char *)__mptr - __builtin_offsetof(typeof(*(node__)), node_list ) );})) |
| 506 | |
| 507 | void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, |
| 508 | struct drm_mm *mm, |
| 509 | u64 size, u64 alignment, unsigned long color, |
| 510 | u64 start, u64 end, |
| 511 | enum drm_mm_insert_mode mode); |
| 512 | |
| 513 | /** |
| 514 | * drm_mm_scan_init - initialize lru scanning |
| 515 | * @scan: scan state |
| 516 | * @mm: drm_mm to scan |
| 517 | * @size: size of the allocation |
| 518 | * @alignment: alignment of the allocation |
| 519 | * @color: opaque tag value to use for the allocation |
| 520 | * @mode: fine-tune the allocation search and placement |
| 521 | * |
| 522 | * This is a simplified version of drm_mm_scan_init_with_range() with no range |
| 523 | * restrictions applied. |
| 524 | * |
| 525 | * This simply sets up the scanning routines with the parameters for the desired |
| 526 | * hole. |
| 527 | * |
| 528 | * Warning: |
| 529 | * As long as the scan list is non-empty, no other operations than |
| 530 | * adding/removing nodes to/from the scan list are allowed. |
| 531 | */ |
| 532 | static inline void drm_mm_scan_init(struct drm_mm_scan *scan, |
| 533 | struct drm_mm *mm, |
| 534 | u64 size, |
| 535 | u64 alignment, |
| 536 | unsigned long color, |
| 537 | enum drm_mm_insert_mode mode) |
| 538 | { |
| 539 | drm_mm_scan_init_with_range(scan, mm, |
| 540 | size, alignment, color, |
| 541 | 0, U64_MAX0xffffffffffffffffULL, mode); |
| 542 | } |
| 543 | |
| 544 | bool_Bool drm_mm_scan_add_block(struct drm_mm_scan *scan, |
| 545 | struct drm_mm_node *node); |
| 546 | bool_Bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, |
| 547 | struct drm_mm_node *node); |
| 548 | struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan); |
| 549 | |
| 550 | void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p); |
| 551 | |
| 552 | #endif |
| 1 | /* $OpenBSD: atomic.h,v 1.22 2024/01/06 12:52:20 jsg Exp $ */ | |||
| 2 | /** | |||
| 3 | * \file drm_atomic.h | |||
| 4 | * Atomic operations used in the DRM which may or may not be provided by the OS. | |||
| 5 | * | |||
| 6 | * \author Eric Anholt <anholt@FreeBSD.org> | |||
| 7 | */ | |||
| 8 | ||||
| 9 | /*- | |||
| 10 | * Copyright 2004 Eric Anholt | |||
| 11 | * All Rights Reserved. | |||
| 12 | * | |||
| 13 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
| 14 | * copy of this software and associated documentation files (the "Software"), | |||
| 15 | * to deal in the Software without restriction, including without limitation | |||
| 16 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
| 17 | * and/or sell copies of the Software, and to permit persons to whom the | |||
| 18 | * Software is furnished to do so, subject to the following conditions: | |||
| 19 | * | |||
| 20 | * The above copyright notice and this permission notice (including the next | |||
| 21 | * paragraph) shall be included in all copies or substantial portions of the | |||
| 22 | * Software. | |||
| 23 | * | |||
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| 25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| 26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
| 27 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |||
| 28 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |||
| 29 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |||
| 30 | * OTHER DEALINGS IN THE SOFTWARE. | |||
| 31 | */ | |||
| 32 | ||||
| 33 | #ifndef _DRM_LINUX_ATOMIC_H_ | |||
| 34 | #define _DRM_LINUX_ATOMIC_H_ | |||
| 35 | ||||
| 36 | #include <sys/types.h> | |||
| 37 | #include <sys/mutex.h> | |||
| 38 | #include <machine/intr.h> | |||
| 39 | #include <linux/types.h> | |||
| 40 | #include <linux/compiler.h> /* via x86/include/asm/atomic.h */ | |||
| 41 | ||||
| 42 | #define ATOMIC_INIT(x)(x) (x) | |||
| 43 | ||||
| 44 | #define atomic_set(p, v)({ typeof(*(p)) __tmp = ((v)); *(volatile typeof(*(p)) *)& (*(p)) = __tmp; __tmp; }) WRITE_ONCE(*(p), (v))({ typeof(*(p)) __tmp = ((v)); *(volatile typeof(*(p)) *)& (*(p)) = __tmp; __tmp; }) | |||
| 45 | #define atomic_read(p)({ typeof(*(p)) __tmp = *(volatile typeof(*(p)) *)&(*(p)) ; membar_datadep_consumer(); __tmp; }) READ_ONCE(*(p))({ typeof(*(p)) __tmp = *(volatile typeof(*(p)) *)&(*(p)) ; membar_datadep_consumer(); __tmp; }) | |||
| 46 | #define atomic_inc(p)__sync_fetch_and_add(p, 1) __sync_fetch_and_add(p, 1) | |||
| 47 | #define atomic_dec(p)__sync_fetch_and_sub(p, 1) __sync_fetch_and_sub(p, 1) | |||
| 48 | #define atomic_add(n, p)__sync_fetch_and_add(p, n) __sync_fetch_and_add(p, n) | |||
| 49 | #define atomic_sub(n, p)__sync_fetch_and_sub(p, n) __sync_fetch_and_sub(p, n) | |||
| 50 | #define atomic_and(n, p)__sync_fetch_and_and(p, n) __sync_fetch_and_and(p, n) | |||
| 51 | #define atomic_or(n, p)x86_atomic_setbits_u32(p, n) atomic_setbits_intx86_atomic_setbits_u32(p, n) | |||
| 52 | #define atomic_add_return(n, p)__sync_add_and_fetch(p, n) __sync_add_and_fetch(p, n) | |||
| 53 | #define atomic_sub_return(n, p)__sync_sub_and_fetch(p, n) __sync_sub_and_fetch(p, n) | |||
| 54 | #define atomic_sub_and_test(n, p)(__sync_sub_and_fetch(p, n) == 0) (atomic_sub_return(n, p)__sync_sub_and_fetch(p, n) == 0) | |||
| 55 | #define atomic_inc_return(v)__sync_add_and_fetch((v), 1) atomic_add_return(1, (v))__sync_add_and_fetch((v), 1) | |||
| 56 | #define atomic_dec_return(v)__sync_sub_and_fetch((v), 1) atomic_sub_return(1, (v))__sync_sub_and_fetch((v), 1) | |||
| 57 | #define atomic_dec_and_test(v)(__sync_sub_and_fetch((v), 1) == 0) (atomic_dec_return(v)__sync_sub_and_fetch((v), 1) == 0) | |||
| 58 | #define atomic_inc_and_test(v)(__sync_add_and_fetch((v), 1) == 0) (atomic_inc_return(v)__sync_add_and_fetch((v), 1) == 0) | |||
| 59 | #define atomic_cmpxchg(p, o, n)__sync_val_compare_and_swap(p, o, n) __sync_val_compare_and_swap(p, o, n) | |||
| 60 | #define cmpxchg(p, o, n)__sync_val_compare_and_swap(p, o, n) __sync_val_compare_and_swap(p, o, n) | |||
| 61 | #define cmpxchg64(p, o, n)__sync_val_compare_and_swap(p, o, n) __sync_val_compare_and_swap(p, o, n) | |||
| 62 | #define atomic_set_release(p, v)({ typeof(*((p))) __tmp = (((v))); *(volatile typeof(*((p))) * )&(*((p))) = __tmp; __tmp; }) atomic_set((p), (v))({ typeof(*((p))) __tmp = (((v))); *(volatile typeof(*((p))) * )&(*((p))) = __tmp; __tmp; }) | |||
| 63 | #define atomic_andnot(bits, p)x86_atomic_clearbits_u32(p,bits) atomic_clearbits_intx86_atomic_clearbits_u32(p,bits) | |||
| 64 | #define atomic_fetch_inc(p)__sync_fetch_and_add(p, 1) __sync_fetch_and_add(p, 1) | |||
| 65 | #define atomic_fetch_xor(n, p)__sync_fetch_and_xor(p, n) __sync_fetch_and_xor(p, n) | |||
| 66 | ||||
| 67 | #define try_cmpxchg(p, op, n)({ __typeof(p) __op = (__typeof((p)))(op); __typeof(*(p)) __o = *__op; __typeof(*(p)) __p = __sync_val_compare_and_swap((p ), (__o), (n)); if (__p != __o) *__op = __p; (__p == __o); }) \ | |||
| 68 | ({ \ | |||
| 69 | __typeof(p) __op = (__typeof((p)))(op); \ | |||
| 70 | __typeof(*(p)) __o = *__op; \ | |||
| 71 | __typeof(*(p)) __p = __sync_val_compare_and_swap((p), (__o), (n)); \ | |||
| 72 | if (__p != __o) \ | |||
| 73 | *__op = __p; \ | |||
| 74 | (__p == __o); \ | |||
| 75 | }) | |||
| 76 | ||||
| 77 | static inline bool_Bool | |||
| 78 | atomic_try_cmpxchg(volatile int *p, int *op, int n) | |||
| 79 | { | |||
| 80 | return try_cmpxchg(p, op, n)({ __typeof(p) __op = (__typeof((p)))(op); __typeof(*(p)) __o = *__op; __typeof(*(p)) __p = __sync_val_compare_and_swap((p ), (__o), (n)); if (__p != __o) *__op = __p; (__p == __o); }); | |||
| 81 | } | |||
| 82 | ||||
| 83 | static inline int | |||
| 84 | atomic_xchg(volatile int *v, int n) | |||
| 85 | { | |||
| 86 | __sync_synchronize(); | |||
| 87 | return __sync_lock_test_and_set(v, n); | |||
| 88 | } | |||
| 89 | ||||
| 90 | #define xchg(v, n)__sync_lock_test_and_set(v, n) __sync_lock_test_and_set(v, n) | |||
| 91 | ||||
| 92 | static inline int | |||
| 93 | atomic_add_unless(volatile int *v, int n, int u) | |||
| 94 | { | |||
| 95 | int o; | |||
| 96 | ||||
| 97 | do { | |||
| 98 | o = *v; | |||
| 99 | if (o == u) | |||
| 100 | return 0; | |||
| 101 | } while (__sync_val_compare_and_swap(v, o, o +n) != o); | |||
| 102 | ||||
| 103 | return 1; | |||
| 104 | } | |||
| 105 | ||||
| 106 | #define atomic_inc_not_zero(v)atomic_add_unless((v), 1, 0) atomic_add_unless((v), 1, 0) | |||
| 107 | ||||
| 108 | static inline int | |||
| 109 | atomic_dec_if_positive(volatile int *v) | |||
| 110 | { | |||
| 111 | int r, o; | |||
| 112 | ||||
| 113 | do { | |||
| 114 | o = *v; | |||
| 115 | r = o - 1; | |||
| 116 | if (r < 0) | |||
| 117 | break; | |||
| 118 | } while (__sync_val_compare_and_swap(v, o, r) != o); | |||
| 119 | ||||
| 120 | return r; | |||
| 121 | } | |||
| 122 | ||||
| 123 | #define atomic_long_read(p)({ typeof(*(p)) __tmp = *(volatile typeof(*(p)) *)&(*(p)) ; membar_datadep_consumer(); __tmp; }) READ_ONCE(*(p))({ typeof(*(p)) __tmp = *(volatile typeof(*(p)) *)&(*(p)) ; membar_datadep_consumer(); __tmp; }) | |||
| 124 | ||||
| 125 | /* 32 bit powerpc lacks 64 bit atomics */ | |||
| 126 | #if !defined(__powerpc__) || defined(__powerpc64__) | |||
| 127 | ||||
| 128 | typedef int64_t atomic64_t; | |||
| 129 | ||||
| 130 | #define ATOMIC64_INIT(x)(x) (x) | |||
| 131 | ||||
| 132 | #define atomic64_set(p, v)({ typeof(*(p)) __tmp = ((v)); *(volatile typeof(*(p)) *)& (*(p)) = __tmp; __tmp; }) WRITE_ONCE(*(p), (v))({ typeof(*(p)) __tmp = ((v)); *(volatile typeof(*(p)) *)& (*(p)) = __tmp; __tmp; }) | |||
| 133 | #define atomic64_read(p)({ typeof(*(p)) __tmp = *(volatile typeof(*(p)) *)&(*(p)) ; membar_datadep_consumer(); __tmp; }) READ_ONCE(*(p))({ typeof(*(p)) __tmp = *(volatile typeof(*(p)) *)&(*(p)) ; membar_datadep_consumer(); __tmp; }) | |||
| 134 | ||||
| 135 | static inline int64_t | |||
| 136 | atomic64_xchg(volatile int64_t *v, int64_t n) | |||
| 137 | { | |||
| 138 | __sync_synchronize(); | |||
| 139 | return __sync_lock_test_and_set(v, n); | |||
| 140 | } | |||
| 141 | ||||
| 142 | static inline int64_t | |||
| 143 | atomic64_cmpxchg(volatile int64_t *v, int64_t o, int64_t n) | |||
| 144 | { | |||
| 145 | return __sync_val_compare_and_swap(v, o, n); | |||
| 146 | } | |||
| 147 | ||||
| 148 | #define atomic64_add(n, p)__sync_fetch_and_add_8(p, n) __sync_fetch_and_add_8(p, n) | |||
| 149 | #define atomic64_sub(n, p)__sync_fetch_and_sub_8(p, n) __sync_fetch_and_sub_8(p, n) | |||
| 150 | #define atomic64_inc(p)__sync_fetch_and_add_8(p, 1) __sync_fetch_and_add_8(p, 1) | |||
| 151 | #define atomic64_add_return(n, p)__sync_add_and_fetch_8(p, n) __sync_add_and_fetch_8(p, n) | |||
| 152 | #define atomic64_inc_return(p)__sync_add_and_fetch_8(p, 1) __sync_add_and_fetch_8(p, 1) | |||
| 153 | ||||
| 154 | #else | |||
| 155 | ||||
| 156 | extern struct mutex atomic64_mtx; | |||
| 157 | ||||
| 158 | typedef struct { | |||
| 159 | volatile int64_t val; | |||
| 160 | } atomic64_t; | |||
| 161 | ||||
| 162 | #define ATOMIC64_INIT(x)(x) { (x) } | |||
| 163 | ||||
| 164 | static inline void | |||
| 165 | atomic64_set(atomic64_t *v, int64_t i)({ typeof(*(atomic64_t *v)) __tmp = ((int64_t i)); *(volatile typeof(*(atomic64_t *v)) *)&(*(atomic64_t *v)) = __tmp; __tmp ; }) | |||
| 166 | { | |||
| 167 | mtx_enter(&atomic64_mtx); | |||
| 168 | v->val = i; | |||
| 169 | mtx_leave(&atomic64_mtx); | |||
| 170 | } | |||
| 171 | ||||
| 172 | static inline int64_t | |||
| 173 | atomic64_read(atomic64_t *v)({ typeof(*(atomic64_t *v)) __tmp = *(volatile typeof(*(atomic64_t *v)) *)&(*(atomic64_t *v)); membar_datadep_consumer(); __tmp ; }) | |||
| 174 | { | |||
| 175 | int64_t val; | |||
| 176 | ||||
| 177 | mtx_enter(&atomic64_mtx); | |||
| 178 | val = v->val; | |||
| 179 | mtx_leave(&atomic64_mtx); | |||
| 180 | ||||
| 181 | return val; | |||
| 182 | } | |||
| 183 | ||||
| 184 | static inline int64_t | |||
| 185 | atomic64_xchg(atomic64_t *v, int64_t n) | |||
| 186 | { | |||
| 187 | int64_t val; | |||
| 188 | ||||
| 189 | mtx_enter(&atomic64_mtx); | |||
| 190 | val = v->val; | |||
| 191 | v->val = n; | |||
| 192 | mtx_leave(&atomic64_mtx); | |||
| 193 | ||||
| 194 | return val; | |||
| 195 | } | |||
| 196 | ||||
| 197 | static inline void | |||
| 198 | atomic64_add(int i, atomic64_t *v)__sync_fetch_and_add_8(atomic64_t *v, int i) | |||
| 199 | { | |||
| 200 | mtx_enter(&atomic64_mtx); | |||
| 201 | v->val += i; | |||
| 202 | mtx_leave(&atomic64_mtx); | |||
| 203 | } | |||
| 204 | ||||
| 205 | #define atomic64_inc(p)__sync_fetch_and_add_8(p, 1) atomic64_add(p, 1)__sync_fetch_and_add_8(1, p) | |||
| 206 | ||||
| 207 | static inline int64_t | |||
| 208 | atomic64_add_return(int i, atomic64_t *v)__sync_add_and_fetch_8(atomic64_t *v, int i) | |||
| 209 | { | |||
| 210 | int64_t val; | |||
| 211 | ||||
| 212 | mtx_enter(&atomic64_mtx); | |||
| 213 | val = v->val + i; | |||
| 214 | v->val = val; | |||
| 215 | mtx_leave(&atomic64_mtx); | |||
| 216 | ||||
| 217 | return val; | |||
| 218 | } | |||
| 219 | ||||
| 220 | #define atomic64_inc_return(p)__sync_add_and_fetch_8(p, 1) atomic64_add_return(p, 1)__sync_add_and_fetch_8(1, p) | |||
| 221 | ||||
| 222 | static inline void | |||
| 223 | atomic64_sub(int i, atomic64_t *v)__sync_fetch_and_sub_8(atomic64_t *v, int i) | |||
| 224 | { | |||
| 225 | mtx_enter(&atomic64_mtx); | |||
| 226 | v->val -= i; | |||
| 227 | mtx_leave(&atomic64_mtx); | |||
| 228 | } | |||
| 229 | #endif | |||
| 230 | ||||
| 231 | #ifdef __LP64__1 | |||
| 232 | typedef int64_t atomic_long_t; | |||
| 233 | #define atomic_long_set(p, v)({ typeof(*(p)) __tmp = ((v)); *(volatile typeof(*(p)) *)& (*(p)) = __tmp; __tmp; }) atomic64_set(p, v)({ typeof(*(p)) __tmp = ((v)); *(volatile typeof(*(p)) *)& (*(p)) = __tmp; __tmp; }) | |||
| 234 | #define atomic_long_xchg(v, n)atomic64_xchg(v, n) atomic64_xchg(v, n) | |||
| 235 | #define atomic_long_cmpxchg(p, o, n)__sync_val_compare_and_swap(p, o, n) atomic_cmpxchg(p, o, n)__sync_val_compare_and_swap(p, o, n) | |||
| 236 | #define atomic_long_add(i, v)__sync_fetch_and_add_8(v, i) atomic64_add(i, v)__sync_fetch_and_add_8(v, i) | |||
| 237 | #define atomic_long_sub(i, v)__sync_fetch_and_sub_8(v, i) atomic64_sub(i, v)__sync_fetch_and_sub_8(v, i) | |||
| 238 | #else | |||
| 239 | typedef int32_t atomic_long_t; | |||
| 240 | #define atomic_long_set(p, v)({ typeof(*(p)) __tmp = ((v)); *(volatile typeof(*(p)) *)& (*(p)) = __tmp; __tmp; }) atomic_set(p, v)({ typeof(*(p)) __tmp = ((v)); *(volatile typeof(*(p)) *)& (*(p)) = __tmp; __tmp; }) | |||
| 241 | #define atomic_long_xchg(v, n)atomic64_xchg(v, n) atomic_xchg(v, n) | |||
| 242 | #define atomic_long_cmpxchg(p, o, n)__sync_val_compare_and_swap(p, o, n) atomic_cmpxchg(p, o, n)__sync_val_compare_and_swap(p, o, n) | |||
| 243 | #define atomic_long_add(i, v)__sync_fetch_and_add_8(v, i) atomic_add(i, v)__sync_fetch_and_add(v, i) | |||
| 244 | #define atomic_long_sub(i, v)__sync_fetch_and_sub_8(v, i) atomic_sub(i, v)__sync_fetch_and_sub(v, i) | |||
| 245 | #endif | |||
| 246 | ||||
| 247 | static inline atomic_t | |||
| 248 | test_and_set_bit(u_int b, volatile void *p) | |||
| 249 | { | |||
| 250 | unsigned int m = 1 << (b & 0x1f); | |||
| 251 | unsigned int prev = __sync_fetch_and_or((volatile u_int *)p + (b >> 5), m); | |||
| 252 | return (prev & m) != 0; | |||
| 253 | } | |||
| 254 | ||||
| 255 | static inline void | |||
| 256 | clear_bit(u_int b, volatile void *p) | |||
| 257 | { | |||
| 258 | atomic_clearbits_intx86_atomic_clearbits_u32(((volatile u_int *)p) + (b >> 5), 1 << (b & 0x1f)); | |||
| 259 | } | |||
| 260 | ||||
| 261 | static inline void | |||
| 262 | clear_bit_unlock(u_int b, volatile void *p) | |||
| 263 | { | |||
| 264 | membar_enter()do { __asm volatile("mfence" ::: "memory"); } while (0); | |||
| 265 | clear_bit(b, p); | |||
| 266 | } | |||
| 267 | ||||
| 268 | static inline void | |||
| 269 | set_bit(u_int b, volatile void *p) | |||
| 270 | { | |||
| 271 | atomic_setbits_intx86_atomic_setbits_u32(((volatile u_int *)p) + (b >> 5), 1 << (b & 0x1f)); | |||
| 272 | } | |||
| 273 | ||||
| 274 | static inline void | |||
| 275 | __clear_bit(u_int b, volatile void *p) | |||
| 276 | { | |||
| 277 | volatile u_int *ptr = (volatile u_int *)p; | |||
| 278 | ptr[b >> 5] &= ~(1 << (b & 0x1f)); | |||
| 279 | } | |||
| 280 | ||||
| 281 | static inline void | |||
| 282 | __set_bit(u_int b, volatile void *p) | |||
| 283 | { | |||
| 284 | volatile u_int *ptr = (volatile u_int *)p; | |||
| 285 | ptr[b >> 5] |= (1 << (b & 0x1f)); | |||
| 286 | } | |||
| 287 | ||||
| 288 | static inline int | |||
| 289 | test_bit(u_int b, const volatile void *p) | |||
| 290 | { | |||
| 291 | return !!(((volatile u_int *)p)[b >> 5] & (1 << (b & 0x1f))); | |||
| ||||
| 292 | } | |||
| 293 | ||||
| 294 | static inline int | |||
| 295 | __test_and_set_bit(u_int b, volatile void *p) | |||
| 296 | { | |||
| 297 | unsigned int m = 1 << (b & 0x1f); | |||
| 298 | volatile u_int *ptr = (volatile u_int *)p; | |||
| 299 | unsigned int prev = ptr[b >> 5]; | |||
| 300 | ptr[b >> 5] |= m; | |||
| 301 | ||||
| 302 | return (prev & m) != 0; | |||
| 303 | } | |||
| 304 | ||||
| 305 | static inline int | |||
| 306 | test_and_clear_bit(u_int b, volatile void *p) | |||
| 307 | { | |||
| 308 | unsigned int m = 1 << (b & 0x1f); | |||
| 309 | unsigned int prev = __sync_fetch_and_and((volatile u_int *)p + (b >> 5), ~m); | |||
| 310 | return (prev & m) != 0; | |||
| 311 | } | |||
| 312 | ||||
| 313 | static inline int | |||
| 314 | __test_and_clear_bit(u_int b, volatile void *p) | |||
| 315 | { | |||
| 316 | volatile u_int *ptr = (volatile u_int *)p; | |||
| 317 | int rv = !!(ptr[b >> 5] & (1 << (b & 0x1f))); | |||
| 318 | ptr[b >> 5] &= ~(1 << (b & 0x1f)); | |||
| 319 | return rv; | |||
| 320 | } | |||
| 321 | ||||
| 322 | static inline int | |||
| 323 | find_first_zero_bit(volatile void *p, int max) | |||
| 324 | { | |||
| 325 | int b; | |||
| 326 | volatile u_int *ptr = (volatile u_int *)p; | |||
| 327 | ||||
| 328 | for (b = 0; b < max; b += 32) { | |||
| 329 | if (ptr[b >> 5] != ~0) { | |||
| 330 | for (;;) { | |||
| 331 | if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0) | |||
| 332 | return b; | |||
| 333 | b++; | |||
| 334 | } | |||
| 335 | } | |||
| 336 | } | |||
| 337 | return max; | |||
| 338 | } | |||
| 339 | ||||
| 340 | static inline int | |||
| 341 | find_next_zero_bit(volatile void *p, int max, int b) | |||
| 342 | { | |||
| 343 | volatile u_int *ptr = (volatile u_int *)p; | |||
| 344 | ||||
| 345 | for (; b < max; b += 32) { | |||
| 346 | if (ptr[b >> 5] != ~0) { | |||
| 347 | for (;;) { | |||
| 348 | if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0) | |||
| 349 | return b; | |||
| 350 | b++; | |||
| 351 | } | |||
| 352 | } | |||
| 353 | } | |||
| 354 | return max; | |||
| 355 | } | |||
| 356 | ||||
| 357 | static inline int | |||
| 358 | find_first_bit(volatile void *p, int max) | |||
| 359 | { | |||
| 360 | int b; | |||
| 361 | volatile u_int *ptr = (volatile u_int *)p; | |||
| 362 | ||||
| 363 | for (b = 0; b < max; b += 32) { | |||
| 364 | if (ptr[b >> 5] != 0) { | |||
| 365 | for (;;) { | |||
| 366 | if (ptr[b >> 5] & (1 << (b & 0x1f))) | |||
| 367 | return b; | |||
| 368 | b++; | |||
| 369 | } | |||
| 370 | } | |||
| 371 | } | |||
| 372 | return max; | |||
| 373 | } | |||
| 374 | ||||
| 375 | static inline int | |||
| 376 | find_next_bit(const volatile void *p, int max, int b) | |||
| 377 | { | |||
| 378 | volatile u_int *ptr = (volatile u_int *)p; | |||
| 379 | ||||
| 380 | for (; b < max; b+= 32) { | |||
| 381 | if (ptr[b >> 5] != 0) { | |||
| 382 | for (;;) { | |||
| 383 | if (ptr[b >> 5] & (1 << (b & 0x1f))) | |||
| 384 | return b; | |||
| 385 | b++; | |||
| 386 | } | |||
| 387 | } | |||
| 388 | } | |||
| 389 | return max; | |||
| 390 | } | |||
| 391 | ||||
| 392 | #define for_each_set_bit(b, p, max)for ((b) = find_first_bit((p), (max)); (b) < (max); (b) = find_next_bit ((p), (max), (b) + 1)) \ | |||
| 393 | for ((b) = find_first_bit((p), (max)); \ | |||
| 394 | (b) < (max); \ | |||
| 395 | (b) = find_next_bit((p), (max), (b) + 1)) | |||
| 396 | ||||
| 397 | #define for_each_clear_bit(b, p, max)for ((b) = find_first_zero_bit((p), (max)); (b) < (max); ( b) = find_next_zero_bit((p), (max), (b) + 1)) \ | |||
| 398 | for ((b) = find_first_zero_bit((p), (max)); \ | |||
| 399 | (b) < (max); \ | |||
| 400 | (b) = find_next_zero_bit((p), (max), (b) + 1)) | |||
| 401 | ||||
| 402 | #if defined(__i386__) | |||
| 403 | #define rmb()do { __asm volatile("lfence" ::: "memory"); } while (0) __asm volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc") | |||
| 404 | #define wmb()do { __asm volatile("sfence" ::: "memory"); } while (0) __asm volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc") | |||
| 405 | #define mb()do { __asm volatile("mfence" ::: "memory"); } while (0) __asm volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc") | |||
| 406 | #define smp_mb()__asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc" ) __asm volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc") | |||
| 407 | #define smp_rmb()do { __asm volatile("" ::: "memory"); } while (0) __membar("")do { __asm volatile("" ::: "memory"); } while (0) | |||
| 408 | #define smp_wmb()do { __asm volatile("" ::: "memory"); } while (0) __membar("")do { __asm volatile("" ::: "memory"); } while (0) | |||
| 409 | #define __smp_store_mb(var, value)do { (void)__sync_lock_test_and_set(&var, value); } while (0) do { (void)xchg(&var, value)__sync_lock_test_and_set(&var, value); } while (0) | |||
| 410 | #define smp_mb__after_atomic()do { } while (0) do { } while (0) | |||
| 411 | #define smp_mb__before_atomic()do { } while (0) do { } while (0) | |||
| 412 | #elif defined(__amd64__1) | |||
| 413 | #define rmb()do { __asm volatile("lfence" ::: "memory"); } while (0) __membar("lfence")do { __asm volatile("lfence" ::: "memory"); } while (0) | |||
| 414 | #define wmb()do { __asm volatile("sfence" ::: "memory"); } while (0) __membar("sfence")do { __asm volatile("sfence" ::: "memory"); } while (0) | |||
| 415 | #define mb()do { __asm volatile("mfence" ::: "memory"); } while (0) __membar("mfence")do { __asm volatile("mfence" ::: "memory"); } while (0) | |||
| 416 | #define smp_mb()__asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc" ) __asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc") | |||
| 417 | #define smp_rmb()do { __asm volatile("" ::: "memory"); } while (0) __membar("")do { __asm volatile("" ::: "memory"); } while (0) | |||
| 418 | #define smp_wmb()do { __asm volatile("" ::: "memory"); } while (0) __membar("")do { __asm volatile("" ::: "memory"); } while (0) | |||
| 419 | #define __smp_store_mb(var, value)do { (void)__sync_lock_test_and_set(&var, value); } while (0) do { (void)xchg(&var, value)__sync_lock_test_and_set(&var, value); } while (0) | |||
| 420 | #define smp_mb__after_atomic()do { } while (0) do { } while (0) | |||
| 421 | #define smp_mb__before_atomic()do { } while (0) do { } while (0) | |||
| 422 | #elif defined(__aarch64__) | |||
| 423 | #define rmb()do { __asm volatile("lfence" ::: "memory"); } while (0) __membar("dsb ld")do { __asm volatile("dsb ld" ::: "memory"); } while (0) | |||
| 424 | #define wmb()do { __asm volatile("sfence" ::: "memory"); } while (0) __membar("dsb st")do { __asm volatile("dsb st" ::: "memory"); } while (0) | |||
| 425 | #define mb()do { __asm volatile("mfence" ::: "memory"); } while (0) __membar("dsb sy")do { __asm volatile("dsb sy" ::: "memory"); } while (0) | |||
| 426 | #define dma_rmb() __membar("dmb oshld")do { __asm volatile("dmb oshld" ::: "memory"); } while (0) | |||
| 427 | #define dma_wmb() __membar("dmb oshst")do { __asm volatile("dmb oshst" ::: "memory"); } while (0) | |||
| 428 | #define dma_mb() __membar("dmb osh")do { __asm volatile("dmb osh" ::: "memory"); } while (0) | |||
| 429 | #elif defined(__arm__) | |||
| 430 | #define rmb()do { __asm volatile("lfence" ::: "memory"); } while (0) __membar("dsb sy")do { __asm volatile("dsb sy" ::: "memory"); } while (0) | |||
| 431 | #define wmb()do { __asm volatile("sfence" ::: "memory"); } while (0) __membar("dsb sy")do { __asm volatile("dsb sy" ::: "memory"); } while (0) | |||
| 432 | #define mb()do { __asm volatile("mfence" ::: "memory"); } while (0) __membar("dsb sy")do { __asm volatile("dsb sy" ::: "memory"); } while (0) | |||
| 433 | #elif defined(__mips64__) | |||
| 434 | #define rmb()do { __asm volatile("lfence" ::: "memory"); } while (0) mips_sync() | |||
| 435 | #define wmb()do { __asm volatile("sfence" ::: "memory"); } while (0) mips_sync() | |||
| 436 | #define mb()do { __asm volatile("mfence" ::: "memory"); } while (0) mips_sync() | |||
| 437 | #elif defined(__powerpc64__) | |||
| 438 | #define rmb()do { __asm volatile("lfence" ::: "memory"); } while (0) __membar("sync")do { __asm volatile("sync" ::: "memory"); } while (0) | |||
| 439 | #define wmb()do { __asm volatile("sfence" ::: "memory"); } while (0) __membar("sync")do { __asm volatile("sync" ::: "memory"); } while (0) | |||
| 440 | #define mb()do { __asm volatile("mfence" ::: "memory"); } while (0) __membar("sync")do { __asm volatile("sync" ::: "memory"); } while (0) | |||
| 441 | #define smp_rmb()do { __asm volatile("" ::: "memory"); } while (0) __membar("lwsync")do { __asm volatile("lwsync" ::: "memory"); } while (0) | |||
| 442 | #define smp_wmb()do { __asm volatile("" ::: "memory"); } while (0) __membar("lwsync")do { __asm volatile("lwsync" ::: "memory"); } while (0) | |||
| 443 | #elif defined(__powerpc__) | |||
| 444 | #define rmb()do { __asm volatile("lfence" ::: "memory"); } while (0) __membar("sync")do { __asm volatile("sync" ::: "memory"); } while (0) | |||
| 445 | #define wmb()do { __asm volatile("sfence" ::: "memory"); } while (0) __membar("sync")do { __asm volatile("sync" ::: "memory"); } while (0) | |||
| 446 | #define mb()do { __asm volatile("mfence" ::: "memory"); } while (0) __membar("sync")do { __asm volatile("sync" ::: "memory"); } while (0) | |||
| 447 | #define smp_wmb()do { __asm volatile("" ::: "memory"); } while (0) __membar("eieio")do { __asm volatile("eieio" ::: "memory"); } while (0) | |||
| 448 | #elif defined(__riscv) | |||
| 449 | #define rmb()do { __asm volatile("lfence" ::: "memory"); } while (0) __membar("fence ir,ir")do { __asm volatile("fence ir,ir" ::: "memory"); } while (0) | |||
| 450 | #define wmb()do { __asm volatile("sfence" ::: "memory"); } while (0) __membar("fence ow,ow")do { __asm volatile("fence ow,ow" ::: "memory"); } while (0) | |||
| 451 | #define mb()do { __asm volatile("mfence" ::: "memory"); } while (0) __membar("fence iorw,iorw")do { __asm volatile("fence iorw,iorw" ::: "memory"); } while ( 0) | |||
| 452 | #define smp_rmb()do { __asm volatile("" ::: "memory"); } while (0) __membar("fence r,r")do { __asm volatile("fence r,r" ::: "memory"); } while (0) | |||
| 453 | #define smp_wmb()do { __asm volatile("" ::: "memory"); } while (0) __membar("fence w,w")do { __asm volatile("fence w,w" ::: "memory"); } while (0) | |||
| 454 | #define smp_mb()__asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc" ) __membar("fence rw,rw")do { __asm volatile("fence rw,rw" ::: "memory"); } while (0) | |||
| 455 | #elif defined(__sparc64__) | |||
| 456 | #define rmb()do { __asm volatile("lfence" ::: "memory"); } while (0) membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0) | |||
| 457 | #define wmb()do { __asm volatile("sfence" ::: "memory"); } while (0) membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0) | |||
| 458 | #define mb()do { __asm volatile("mfence" ::: "memory"); } while (0) membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0) | |||
| 459 | #endif | |||
| 460 | ||||
| 461 | #ifndef smp_rmb | |||
| 462 | #define smp_rmb()do { __asm volatile("" ::: "memory"); } while (0) rmb()do { __asm volatile("lfence" ::: "memory"); } while (0) | |||
| 463 | #endif | |||
| 464 | ||||
| 465 | #ifndef smp_wmb | |||
| 466 | #define smp_wmb()do { __asm volatile("" ::: "memory"); } while (0) wmb()do { __asm volatile("sfence" ::: "memory"); } while (0) | |||
| 467 | #endif | |||
| 468 | ||||
| 469 | #ifndef mmiowb | |||
| 470 | #define mmiowb()do { __asm volatile("sfence" ::: "memory"); } while (0) wmb()do { __asm volatile("sfence" ::: "memory"); } while (0) | |||
| 471 | #endif | |||
| 472 | ||||
| 473 | #ifndef smp_mb__before_atomic | |||
| 474 | #define smp_mb__before_atomic()do { } while (0) mb()do { __asm volatile("mfence" ::: "memory"); } while (0) | |||
| 475 | #endif | |||
| 476 | ||||
| 477 | #ifndef smp_mb__after_atomic | |||
| 478 | #define smp_mb__after_atomic()do { } while (0) mb()do { __asm volatile("mfence" ::: "memory"); } while (0) | |||
| 479 | #endif | |||
| 480 | ||||
| 481 | #ifndef smp_store_mb | |||
| 482 | #define smp_store_mb(x, v)do { x = v; do { __asm volatile("mfence" ::: "memory"); } while (0); } while (0) do { x = v; mb()do { __asm volatile("mfence" ::: "memory"); } while (0); } while (0) | |||
| 483 | #endif | |||
| 484 | ||||
| 485 | #endif |