File: | dev/pci/drm/i915/gem/i915_gem_dmabuf.c |
Warning: | line 358, column 2 Value stored to 'ret' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * SPDX-License-Identifier: MIT |
3 | * |
4 | * Copyright 2012 Red Hat Inc |
5 | */ |
6 | |
7 | #include <linux/dma-buf.h> |
8 | #include <linux/highmem.h> |
9 | #include <linux/dma-resv.h> |
10 | #include <linux/module.h> |
11 | |
12 | #include <asm/smp.h> |
13 | |
14 | #include "gem/i915_gem_dmabuf.h" |
15 | #include "i915_drv.h" |
16 | #include "i915_gem_object.h" |
17 | #include "i915_scatterlist.h" |
18 | |
19 | MODULE_IMPORT_NS(DMA_BUF); |
20 | |
21 | I915_SELFTEST_DECLARE(static bool force_different_devices;) |
22 | |
23 | static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) |
24 | { |
25 | return to_intel_bo(buf->priv); |
26 | } |
27 | |
28 | #ifdef notyet |
29 | |
30 | static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, |
31 | enum dma_data_direction dir) |
32 | { |
33 | struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); |
34 | struct sg_table *st; |
35 | struct scatterlist *src, *dst; |
36 | int ret, i; |
37 | |
38 | /* Copy sg so that we make an independent mapping */ |
39 | st = kmalloc(sizeof(struct sg_table), GFP_KERNEL(0x0001 | 0x0004)); |
40 | if (st == NULL((void *)0)) { |
41 | ret = -ENOMEM12; |
42 | goto err; |
43 | } |
44 | |
45 | ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL(0x0001 | 0x0004)); |
46 | if (ret) |
47 | goto err_free; |
48 | |
49 | src = obj->mm.pages->sgl; |
50 | dst = st->sgl; |
51 | for (i = 0; i < obj->mm.pages->orig_nents; i++) { |
52 | sg_set_page(dst, sg_page(src), src->length, 0); |
53 | dst = sg_next(dst); |
54 | src = sg_next(src); |
55 | } |
56 | |
57 | ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC); |
58 | if (ret) |
59 | goto err_free_sg; |
60 | |
61 | return st; |
62 | |
63 | err_free_sg: |
64 | sg_free_table(st); |
65 | err_free: |
66 | kfree(st); |
67 | err: |
68 | return ERR_PTR(ret); |
69 | } |
70 | |
71 | static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, |
72 | struct iosys_map *map) |
73 | { |
74 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); |
75 | void *vaddr; |
76 | |
77 | vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); |
78 | if (IS_ERR(vaddr)) |
79 | return PTR_ERR(vaddr); |
80 | |
81 | iosys_map_set_vaddr(map, vaddr); |
82 | |
83 | return 0; |
84 | } |
85 | |
86 | static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, |
87 | struct iosys_map *map) |
88 | { |
89 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); |
90 | |
91 | i915_gem_object_flush_map(obj); |
92 | i915_gem_object_unpin_map(obj); |
93 | } |
94 | |
95 | static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) |
96 | { |
97 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); |
98 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); |
99 | int ret; |
100 | |
101 | if (obj->base.size < vma->vm_end - vma->vm_start) |
102 | return -EINVAL22; |
103 | |
104 | if (HAS_LMEM(i915)((&(i915)->__runtime)->memory_regions & ((1UL << (INTEL_REGION_LMEM_0))))) |
105 | return drm_gem_prime_mmap(&obj->base, vma); |
106 | |
107 | if (!obj->base.filp) |
108 | return -ENODEV19; |
109 | |
110 | ret = call_mmap(obj->base.filp, vma); |
111 | if (ret) |
112 | return ret; |
113 | |
114 | vma_set_file(vma, obj->base.filp); |
115 | |
116 | return 0; |
117 | } |
118 | |
119 | static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) |
120 | { |
121 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); |
122 | bool_Bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); |
123 | struct i915_gem_ww_ctx ww; |
124 | int err; |
125 | |
126 | i915_gem_ww_ctx_init(&ww, true1); |
127 | retry: |
128 | err = i915_gem_object_lock(obj, &ww); |
129 | if (!err) |
130 | err = i915_gem_object_pin_pages(obj); |
131 | if (!err) { |
132 | err = i915_gem_object_set_to_cpu_domain(obj, write); |
133 | i915_gem_object_unpin_pages(obj); |
134 | } |
135 | if (err == -EDEADLK11) { |
136 | err = i915_gem_ww_ctx_backoff(&ww); |
137 | if (!err) |
138 | goto retry; |
139 | } |
140 | i915_gem_ww_ctx_fini(&ww); |
141 | return err; |
142 | } |
143 | |
144 | static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) |
145 | { |
146 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); |
147 | struct i915_gem_ww_ctx ww; |
148 | int err; |
149 | |
150 | i915_gem_ww_ctx_init(&ww, true1); |
151 | retry: |
152 | err = i915_gem_object_lock(obj, &ww); |
153 | if (!err) |
154 | err = i915_gem_object_pin_pages(obj); |
155 | if (!err) { |
156 | err = i915_gem_object_set_to_gtt_domain(obj, false0); |
157 | i915_gem_object_unpin_pages(obj); |
158 | } |
159 | if (err == -EDEADLK11) { |
160 | err = i915_gem_ww_ctx_backoff(&ww); |
161 | if (!err) |
162 | goto retry; |
163 | } |
164 | i915_gem_ww_ctx_fini(&ww); |
165 | return err; |
166 | } |
167 | |
168 | static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf, |
169 | struct dma_buf_attachment *attach) |
170 | { |
171 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf); |
172 | struct i915_gem_ww_ctx ww; |
173 | int err; |
174 | |
175 | if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM)) |
176 | return -EOPNOTSUPP45; |
177 | |
178 | for_i915_gem_ww(&ww, err, true)for (i915_gem_ww_ctx_init(&ww, 1), (err) = -11; (err) == - 11; (err) = __i915_gem_ww_fini(&ww, err)) { |
179 | err = i915_gem_object_lock(obj, &ww); |
180 | if (err) |
181 | continue; |
182 | |
183 | err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM); |
184 | if (err) |
185 | continue; |
186 | |
187 | err = i915_gem_object_wait_migration(obj, 0); |
188 | if (err) |
189 | continue; |
190 | |
191 | err = i915_gem_object_pin_pages(obj); |
192 | } |
193 | |
194 | return err; |
195 | } |
196 | |
197 | static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf, |
198 | struct dma_buf_attachment *attach) |
199 | { |
200 | struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf); |
201 | |
202 | i915_gem_object_unpin_pages(obj); |
203 | } |
204 | |
205 | #endif /* notyet */ |
206 | |
207 | static const struct dma_buf_ops i915_dmabuf_ops = { |
208 | #ifdef notyet |
209 | .attach = i915_gem_dmabuf_attach, |
210 | .detach = i915_gem_dmabuf_detach, |
211 | .map_dma_buf = i915_gem_map_dma_buf, |
212 | .unmap_dma_buf = drm_gem_unmap_dma_buf, |
213 | #endif |
214 | .release = drm_gem_dmabuf_release, |
215 | #ifdef notyet |
216 | .mmap = i915_gem_dmabuf_mmap, |
217 | .vmap = i915_gem_dmabuf_vmap, |
218 | .vunmap = i915_gem_dmabuf_vunmap, |
219 | .begin_cpu_access = i915_gem_begin_cpu_access, |
220 | .end_cpu_access = i915_gem_end_cpu_access, |
221 | #endif |
222 | }; |
223 | |
224 | struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags) |
225 | { |
226 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
227 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info)struct dma_buf_export_info exp_info; |
228 | |
229 | exp_info.ops = &i915_dmabuf_ops; |
230 | exp_info.size = gem_obj->size; |
231 | exp_info.flags = flags; |
232 | exp_info.priv = gem_obj; |
233 | exp_info.resv = obj->base.resv; |
234 | |
235 | if (obj->ops->dmabuf_export) { |
236 | int ret = obj->ops->dmabuf_export(obj); |
237 | if (ret) |
238 | return ERR_PTR(ret); |
239 | } |
240 | |
241 | return drm_gem_dmabuf_export(gem_obj->dev, &exp_info); |
242 | } |
243 | |
244 | #ifdef notyet |
245 | |
246 | static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) |
247 | { |
248 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(obj->base.dev); |
249 | struct sg_table *pages; |
250 | unsigned int sg_page_sizes; |
251 | |
252 | assert_object_held(obj)do { (void)(&((obj)->base.resv)->lock.base); } while (0); |
253 | |
254 | pages = dma_buf_map_attachment(obj->base.import_attach, |
255 | DMA_BIDIRECTIONAL); |
256 | if (IS_ERR(pages)) |
257 | return PTR_ERR(pages); |
258 | |
259 | /* |
260 | * DG1 is special here since it still snoops transactions even with |
261 | * CACHE_NONE. This is not the case with other HAS_SNOOP platforms. We |
262 | * might need to revisit this as we add new discrete platforms. |
263 | * |
264 | * XXX: Consider doing a vmap flush or something, where possible. |
265 | * Currently we just do a heavy handed wbinvd_on_all_cpus() here since |
266 | * the underlying sg_table might not even point to struct pages, so we |
267 | * can't just call drm_clflush_sg or similar, like we do elsewhere in |
268 | * the driver. |
269 | */ |
270 | if (i915_gem_object_can_bypass_llc(obj) || |
271 | (!HAS_LLC(i915)((&(i915)->__info)->has_llc) && !IS_DG1(i915)IS_PLATFORM(i915, INTEL_DG1))) |
272 | wbinvd_on_all_cpus(); |
273 | |
274 | sg_page_sizes = i915_sg_dma_sizes(pages->sgl); |
275 | __i915_gem_object_set_pages(obj, pages, sg_page_sizes); |
276 | |
277 | return 0; |
278 | } |
279 | |
280 | static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj, |
281 | struct sg_table *pages) |
282 | { |
283 | dma_buf_unmap_attachment(obj->base.import_attach, pages, |
284 | DMA_BIDIRECTIONAL); |
285 | } |
286 | |
287 | static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { |
288 | .name = "i915_gem_object_dmabuf", |
289 | .get_pages = i915_gem_object_get_pages_dmabuf, |
290 | .put_pages = i915_gem_object_put_pages_dmabuf, |
291 | }; |
292 | |
293 | #endif /* notyet */ |
294 | |
295 | struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, |
296 | struct dma_buf *dma_buf) |
297 | { |
298 | static struct lock_class_key lock_class; |
299 | struct dma_buf_attachment *attach; |
300 | struct drm_i915_gem_object *obj; |
301 | int ret; |
302 | |
303 | /* is this one of own objects? */ |
304 | if (dma_buf->ops == &i915_dmabuf_ops) { |
305 | obj = dma_buf_to_obj(dma_buf); |
306 | /* is it from our device? */ |
307 | if (obj->base.dev == dev && |
308 | !I915_SELFTEST_ONLY(force_different_devices)0) { |
309 | /* |
310 | * Importing dmabuf exported from out own gem increases |
311 | * refcount on gem itself instead of f_count of dmabuf. |
312 | */ |
313 | return &i915_gem_object_get(obj)->base; |
314 | } |
315 | } |
316 | |
317 | if (i915_gem_object_size_2big(dma_buf->size)) |
318 | return ERR_PTR(-E2BIG7); |
319 | |
320 | /* need to attach */ |
321 | attach = dma_buf_attach(dma_buf, dev->dev); |
322 | if (IS_ERR(attach)) |
323 | return ERR_CAST(attach); |
324 | |
325 | #ifdef notyet |
326 | get_dma_buf(dma_buf); |
327 | |
328 | obj = i915_gem_object_alloc(); |
329 | if (obj == NULL((void *)0)) { |
330 | ret = -ENOMEM12; |
331 | goto fail_detach; |
332 | } |
333 | |
334 | drm_gem_private_object_init(dev, &obj->base, dma_buf->size); |
335 | i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class, |
336 | I915_BO_ALLOC_USER(1UL << (3))); |
337 | obj->base.import_attach = attach; |
338 | obj->base.resv = dma_buf->resv; |
339 | |
340 | /* We use GTT as shorthand for a coherent domain, one that is |
341 | * neither in the GPU cache nor in the CPU cache, where all |
342 | * writes are immediately visible in memory. (That's not strictly |
343 | * true, but it's close! There are internal buffers such as the |
344 | * write-combined buffer or a delay through the chipset for GTT |
345 | * writes that do require us to treat GTT as a separate cache domain.) |
346 | */ |
347 | obj->read_domains = I915_GEM_DOMAIN_GTT0x00000040; |
348 | obj->write_domain = 0; |
349 | |
350 | return &obj->base; |
351 | |
352 | fail_detach: |
353 | dma_buf_detach(dma_buf, attach); |
354 | dma_buf_put(dma_buf); |
355 | |
356 | return ERR_PTR(ret); |
357 | #else |
358 | ret = 0; |
Value stored to 'ret' is never read | |
359 | panic(__func__); |
360 | #endif |
361 | } |
362 | |
363 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0 |
364 | #include "selftests/mock_dmabuf.c" |
365 | #include "selftests/i915_gem_dmabuf.c" |
366 | #endif |