File: | dev/pci/drm/amd/amdgpu/amdgpu_object.h |
Warning: | line 173, column 24 Value stored to 'adev' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | */ |
28 | #ifndef __AMDGPU_OBJECT_H__ |
29 | #define __AMDGPU_OBJECT_H__ |
30 | |
31 | #include <drm/amdgpu_drm.h> |
32 | #include "amdgpu.h" |
33 | #include "amdgpu_res_cursor.h" |
34 | |
35 | #ifdef CONFIG_MMU_NOTIFIER |
36 | #include <linux/mmu_notifier.h> |
37 | #endif |
38 | |
39 | #define AMDGPU_BO_INVALID_OFFSET0x7fffffffffffffffL LONG_MAX0x7fffffffffffffffL |
40 | #define AMDGPU_BO_MAX_PLACEMENTS3 3 |
41 | |
42 | /* BO flag to indicate a KFD userptr BO */ |
43 | #define AMDGPU_AMDKFD_CREATE_USERPTR_BO(1ULL << 63) (1ULL << 63) |
44 | |
45 | #define to_amdgpu_bo_user(abo)({ const __typeof( ((struct amdgpu_bo_user *)0)->bo ) *__mptr = ((abo)); (struct amdgpu_bo_user *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_user, bo) );}) container_of((abo), struct amdgpu_bo_user, bo)({ const __typeof( ((struct amdgpu_bo_user *)0)->bo ) *__mptr = ((abo)); (struct amdgpu_bo_user *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_user, bo) );}) |
46 | #define to_amdgpu_bo_vm(abo)({ const __typeof( ((struct amdgpu_bo_vm *)0)->bo ) *__mptr = ((abo)); (struct amdgpu_bo_vm *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_vm, bo) );}) container_of((abo), struct amdgpu_bo_vm, bo)({ const __typeof( ((struct amdgpu_bo_vm *)0)->bo ) *__mptr = ((abo)); (struct amdgpu_bo_vm *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_vm, bo) );}) |
47 | |
48 | struct amdgpu_bo_param { |
49 | unsigned long size; |
50 | int byte_align; |
51 | u32 bo_ptr_size; |
52 | u32 domain; |
53 | u32 preferred_domain; |
54 | u64 flags; |
55 | enum ttm_bo_type type; |
56 | bool_Bool no_wait_gpu; |
57 | struct dma_resv *resv; |
58 | void (*destroy)(struct ttm_buffer_object *bo); |
59 | }; |
60 | |
61 | /* bo virtual addresses in a vm */ |
62 | struct amdgpu_bo_va_mapping { |
63 | struct amdgpu_bo_va *bo_va; |
64 | struct list_head list; |
65 | struct rb_node rb; |
66 | uint64_t start; |
67 | uint64_t last; |
68 | uint64_t __subtree_last; |
69 | uint64_t offset; |
70 | uint64_t flags; |
71 | }; |
72 | |
73 | /* User space allocated BO in a VM */ |
74 | struct amdgpu_bo_va { |
75 | struct amdgpu_vm_bo_base base; |
76 | |
77 | /* protected by bo being reserved */ |
78 | unsigned ref_count; |
79 | |
80 | /* all other members protected by the VM PD being reserved */ |
81 | struct dma_fence *last_pt_update; |
82 | |
83 | /* mappings for this bo_va */ |
84 | struct list_head invalids; |
85 | struct list_head valids; |
86 | |
87 | /* If the mappings are cleared or filled */ |
88 | bool_Bool cleared; |
89 | |
90 | bool_Bool is_xgmi; |
91 | }; |
92 | |
93 | struct amdgpu_bo { |
94 | /* Protected by tbo.reserved */ |
95 | u32 preferred_domains; |
96 | u32 allowed_domains; |
97 | struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS3]; |
98 | struct ttm_placement placement; |
99 | struct ttm_buffer_object tbo; |
100 | struct ttm_bo_kmap_obj kmap; |
101 | u64 flags; |
102 | /* per VM structure for page tables and with virtual addresses */ |
103 | struct amdgpu_vm_bo_base *vm_bo; |
104 | /* Constant after initialization */ |
105 | struct amdgpu_device *adev; |
106 | struct amdgpu_bo *parent; |
107 | |
108 | #ifdef CONFIG_MMU_NOTIFIER |
109 | struct mmu_interval_notifier notifier; |
110 | #endif |
111 | struct kgd_mem *kfd_bo; |
112 | }; |
113 | |
114 | struct amdgpu_bo_user { |
115 | struct amdgpu_bo bo; |
116 | u64 tiling_flags; |
117 | u64 metadata_flags; |
118 | void *metadata; |
119 | u32 metadata_size; |
120 | |
121 | }; |
122 | |
123 | struct amdgpu_bo_vm { |
124 | struct amdgpu_bo bo; |
125 | struct amdgpu_bo *shadow; |
126 | struct list_head shadow_list; |
127 | struct amdgpu_vm_bo_base entries[]; |
128 | }; |
129 | |
130 | static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) |
131 | { |
132 | return container_of(tbo, struct amdgpu_bo, tbo)({ const __typeof( ((struct amdgpu_bo *)0)->tbo ) *__mptr = (tbo); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo) );}); |
133 | } |
134 | |
135 | /** |
136 | * amdgpu_mem_type_to_domain - return domain corresponding to mem_type |
137 | * @mem_type: ttm memory type |
138 | * |
139 | * Returns corresponding domain of the ttm mem_type |
140 | */ |
141 | static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) |
142 | { |
143 | switch (mem_type) { |
144 | case TTM_PL_VRAM2: |
145 | return AMDGPU_GEM_DOMAIN_VRAM0x4; |
146 | case TTM_PL_TT1: |
147 | return AMDGPU_GEM_DOMAIN_GTT0x2; |
148 | case TTM_PL_SYSTEM0: |
149 | return AMDGPU_GEM_DOMAIN_CPU0x1; |
150 | case AMDGPU_PL_GDS(3 + 0): |
151 | return AMDGPU_GEM_DOMAIN_GDS0x8; |
152 | case AMDGPU_PL_GWS(3 + 1): |
153 | return AMDGPU_GEM_DOMAIN_GWS0x10; |
154 | case AMDGPU_PL_OA(3 + 2): |
155 | return AMDGPU_GEM_DOMAIN_OA0x20; |
156 | default: |
157 | break; |
158 | } |
159 | return 0; |
160 | } |
161 | |
162 | /** |
163 | * amdgpu_bo_reserve - reserve bo |
164 | * @bo: bo structure |
165 | * @no_intr: don't return -ERESTARTSYS on pending signal |
166 | * |
167 | * Returns: |
168 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
169 | * a signal. Release all buffer reservations and return to user-space. |
170 | */ |
171 | static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool_Bool no_intr) |
172 | { |
173 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
Value stored to 'adev' during its initialization is never read | |
174 | int r; |
175 | |
176 | r = ttm_bo_reserve(&bo->tbo, !no_intr, false0, NULL((void *)0)); |
177 | if (unlikely(r != 0)__builtin_expect(!!(r != 0), 0)) { |
178 | if (r != -ERESTARTSYS4) |
179 | dev_err(adev->dev, "%p reserve failed\n", bo)printf("drm:pid%d:%s *ERROR* " "%p reserve failed\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__ , bo); |
180 | return r; |
181 | } |
182 | return 0; |
183 | } |
184 | |
185 | static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) |
186 | { |
187 | ttm_bo_unreserve(&bo->tbo); |
188 | } |
189 | |
190 | static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) |
191 | { |
192 | return bo->tbo.base.size; |
193 | } |
194 | |
195 | static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) |
196 | { |
197 | return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE4096; |
198 | } |
199 | |
200 | static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) |
201 | { |
202 | return (bo->tbo.page_alignment << PAGE_SHIFT12) / AMDGPU_GPU_PAGE_SIZE4096; |
203 | } |
204 | |
205 | /** |
206 | * amdgpu_bo_mmap_offset - return mmap offset of bo |
207 | * @bo: amdgpu object for which we query the offset |
208 | * |
209 | * Returns mmap offset of the object. |
210 | */ |
211 | static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) |
212 | { |
213 | return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); |
214 | } |
215 | |
216 | /** |
217 | * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM |
218 | */ |
219 | static inline bool_Bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) |
220 | { |
221 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
222 | struct amdgpu_res_cursor cursor; |
223 | |
224 | if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM2) |
225 | return false0; |
226 | |
227 | amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); |
228 | while (cursor.remaining) { |
229 | if (cursor.start < adev->gmc.visible_vram_size) |
230 | return true1; |
231 | |
232 | amdgpu_res_next(&cursor, cursor.size); |
233 | } |
234 | |
235 | return false0; |
236 | } |
237 | |
238 | /** |
239 | * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced |
240 | */ |
241 | static inline bool_Bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) |
242 | { |
243 | return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC(1 << 7); |
244 | } |
245 | |
246 | /** |
247 | * amdgpu_bo_encrypted - test if the BO is encrypted |
248 | * @bo: pointer to a buffer object |
249 | * |
250 | * Return true if the buffer object is encrypted, false otherwise. |
251 | */ |
252 | static inline bool_Bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) |
253 | { |
254 | return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED(1 << 10); |
255 | } |
256 | |
257 | /** |
258 | * amdgpu_bo_shadowed - check if the BO is shadowed |
259 | * |
260 | * @bo: BO to be tested. |
261 | * |
262 | * Returns: |
263 | * NULL if not shadowed or else return a BO pointer. |
264 | */ |
265 | static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo) |
266 | { |
267 | if (bo->tbo.type == ttm_bo_type_kernel) |
268 | return to_amdgpu_bo_vm(bo)({ const __typeof( ((struct amdgpu_bo_vm *)0)->bo ) *__mptr = ((bo)); (struct amdgpu_bo_vm *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo_vm, bo) );})->shadow; |
269 | |
270 | return NULL((void *)0); |
271 | } |
272 | |
273 | bool_Bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); |
274 | void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); |
275 | |
276 | int amdgpu_bo_create(struct amdgpu_device *adev, |
277 | struct amdgpu_bo_param *bp, |
278 | struct amdgpu_bo **bo_ptr); |
279 | int amdgpu_bo_create_reserved(struct amdgpu_device *adev, |
280 | unsigned long size, int align, |
281 | u32 domain, struct amdgpu_bo **bo_ptr, |
282 | u64 *gpu_addr, void **cpu_addr); |
283 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
284 | unsigned long size, int align, |
285 | u32 domain, struct amdgpu_bo **bo_ptr, |
286 | u64 *gpu_addr, void **cpu_addr); |
287 | int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, |
288 | uint64_t offset, uint64_t size, |
289 | struct amdgpu_bo **bo_ptr, void **cpu_addr); |
290 | int amdgpu_bo_create_user(struct amdgpu_device *adev, |
291 | struct amdgpu_bo_param *bp, |
292 | struct amdgpu_bo_user **ubo_ptr); |
293 | int amdgpu_bo_create_vm(struct amdgpu_device *adev, |
294 | struct amdgpu_bo_param *bp, |
295 | struct amdgpu_bo_vm **ubo_ptr); |
296 | void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, |
297 | void **cpu_addr); |
298 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); |
299 | void *amdgpu_bo_kptr(struct amdgpu_bo *bo); |
300 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo); |
301 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); |
302 | void amdgpu_bo_unref(struct amdgpu_bo **bo); |
303 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); |
304 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
305 | u64 min_offset, u64 max_offset); |
306 | void amdgpu_bo_unpin(struct amdgpu_bo *bo); |
307 | int amdgpu_bo_init(struct amdgpu_device *adev); |
308 | void amdgpu_bo_fini(struct amdgpu_device *adev); |
309 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); |
310 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); |
311 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, |
312 | uint32_t metadata_size, uint64_t flags); |
313 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, |
314 | size_t buffer_size, uint32_t *metadata_size, |
315 | uint64_t *flags); |
316 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
317 | bool_Bool evict, |
318 | struct ttm_resource *new_mem); |
319 | void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); |
320 | vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); |
321 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
322 | bool_Bool shared); |
323 | int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, |
324 | enum amdgpu_sync_mode sync_mode, void *owner, |
325 | bool_Bool intr); |
326 | int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool_Bool intr); |
327 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); |
328 | u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); |
329 | void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, |
330 | uint64_t *gtt_mem, uint64_t *cpu_mem); |
331 | void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); |
332 | int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, |
333 | struct dma_fence **fence); |
334 | uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, |
335 | uint32_t domain); |
336 | |
337 | /* |
338 | * sub allocation |
339 | */ |
340 | |
341 | static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo) |
342 | { |
343 | return sa_bo->manager->gpu_addr + sa_bo->soffset; |
344 | } |
345 | |
346 | static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo) |
347 | { |
348 | return sa_bo->manager->cpu_ptr + sa_bo->soffset; |
349 | } |
350 | |
351 | int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, |
352 | struct amdgpu_sa_manager *sa_manager, |
353 | unsigned size, u32 align, u32 domain); |
354 | void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, |
355 | struct amdgpu_sa_manager *sa_manager); |
356 | int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, |
357 | struct amdgpu_sa_manager *sa_manager); |
358 | int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, |
359 | struct amdgpu_sa_bo **sa_bo, |
360 | unsigned size, unsigned align); |
361 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, |
362 | struct amdgpu_sa_bo **sa_bo, |
363 | struct dma_fence *fence); |
364 | #if defined(CONFIG_DEBUG_FS) |
365 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, |
366 | struct seq_file *m); |
367 | u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m); |
368 | #endif |
369 | void amdgpu_debugfs_sa_init(struct amdgpu_device *adev); |
370 | |
371 | bool_Bool amdgpu_bo_support_uswc(u64 bo_flags); |
372 | |
373 | |
374 | #endif |