File: | dev/pci/drm/i915/gem/i915_gem_phys.c |
Warning: | line 132, column 13 Value stored to 'dma' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * SPDX-License-Identifier: MIT |
3 | * |
4 | * Copyright © 2014-2016 Intel Corporation |
5 | */ |
6 | |
7 | #include <linux/highmem.h> |
8 | #include <linux/shmem_fs.h> |
9 | #include <linux/swap.h> |
10 | |
11 | #include <drm/drm.h> /* for drm_legacy.h! */ |
12 | #include <drm/drm_cache.h> |
13 | |
14 | #include "gt/intel_gt.h" |
15 | #include "i915_drv.h" |
16 | #include "i915_gem_object.h" |
17 | #include "i915_gem_region.h" |
18 | #include "i915_scatterlist.h" |
19 | |
20 | static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) |
21 | { |
22 | #ifdef __linux__ |
23 | struct address_space *mapping = obj->base.filp->f_mapping; |
24 | #else |
25 | struct drm_dma_handle *phys; |
26 | #endif |
27 | struct scatterlist *sg; |
28 | struct sg_table *st; |
29 | dma_addr_t dma; |
30 | void *vaddr; |
31 | void *dst; |
32 | int i; |
33 | |
34 | if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))({ __builtin_expect(!!(!!(i915_gem_object_needs_bit17_swizzle (obj))), 0); })) |
35 | return -EINVAL22; |
36 | |
37 | /* |
38 | * Always aligning to the object size, allows a single allocation |
39 | * to handle all possible callers, and given typical object sizes, |
40 | * the alignment of the buddy allocation will naturally match. |
41 | */ |
42 | #ifdef __linux__ |
43 | vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev, |
44 | roundup_pow_of_two(obj->base.size), |
45 | &dma, GFP_KERNEL(0x0001 | 0x0004)); |
46 | if (!vaddr) |
47 | return -ENOMEM12; |
48 | #else |
49 | phys = drm_pci_alloc(obj->base.dev, |
50 | roundup_pow_of_two(obj->base.size), |
51 | roundup_pow_of_two(obj->base.size)); |
52 | if (!phys) |
53 | return -ENOMEM12; |
54 | vaddr = phys->vaddr; |
55 | dma = phys->busaddr; |
56 | #endif |
57 | |
58 | st = kmalloc(sizeof(*st), GFP_KERNEL(0x0001 | 0x0004)); |
59 | if (!st) |
60 | goto err_pci; |
61 | |
62 | if (sg_alloc_table(st, 1, GFP_KERNEL(0x0001 | 0x0004))) |
63 | goto err_st; |
64 | |
65 | sg = st->sgl; |
66 | sg->offset = 0; |
67 | sg->length = obj->base.size; |
68 | |
69 | #ifdef __linux__ |
70 | sg_assign_page(sg, (struct page *)vaddr); |
71 | #else |
72 | sg_assign_page(sg, (struct vm_page *)phys); |
73 | #endif |
74 | sg_dma_address(sg)((sg)->dma_address) = dma; |
75 | sg_dma_len(sg)((sg)->length) = obj->base.size; |
76 | |
77 | dst = vaddr; |
78 | for (i = 0; i < obj->base.size / PAGE_SIZE(1 << 12); i++) { |
79 | struct vm_page *page; |
80 | void *src; |
81 | |
82 | #ifdef __linux__ |
83 | page = shmem_read_mapping_page(mapping, i); |
84 | if (IS_ERR(page)) |
85 | goto err_st; |
86 | #else |
87 | struct pglist plist; |
88 | TAILQ_INIT(&plist)do { (&plist)->tqh_first = ((void *)0); (&plist)-> tqh_last = &(&plist)->tqh_first; } while (0); |
89 | if (uvm_obj_wire(obj->base.uao, i * PAGE_SIZE(1 << 12), |
90 | (i + 1) * PAGE_SIZE(1 << 12), &plist)) |
91 | goto err_st; |
92 | page = TAILQ_FIRST(&plist)((&plist)->tqh_first); |
93 | #endif |
94 | |
95 | src = kmap_atomic(page); |
96 | memcpy(dst, src, PAGE_SIZE)__builtin_memcpy((dst), (src), ((1 << 12))); |
97 | drm_clflush_virt_range(dst, PAGE_SIZE(1 << 12)); |
98 | kunmap_atomic(src); |
99 | |
100 | #ifdef __linux__ |
101 | put_page(page); |
102 | #else |
103 | uvm_obj_unwire(obj->base.uao, i * PAGE_SIZE(1 << 12), |
104 | (i + 1) * PAGE_SIZE(1 << 12)); |
105 | #endif |
106 | dst += PAGE_SIZE(1 << 12); |
107 | } |
108 | |
109 | intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); |
110 | |
111 | __i915_gem_object_set_pages(obj, st, sg->length); |
112 | |
113 | return 0; |
114 | |
115 | err_st: |
116 | kfree(st); |
117 | err_pci: |
118 | #ifdef __linux__ |
119 | dma_free_coherent(&obj->base.dev->pdev->dev, |
120 | roundup_pow_of_two(obj->base.size), |
121 | vaddr, dma); |
122 | #else |
123 | drm_pci_free(obj->base.dev, phys); |
124 | #endif |
125 | return -ENOMEM12; |
126 | } |
127 | |
128 | static void |
129 | i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, |
130 | struct sg_table *pages) |
131 | { |
132 | dma_addr_t dma = sg_dma_address(pages->sgl)((pages->sgl)->dma_address); |
Value stored to 'dma' during its initialization is never read | |
133 | #ifdef __linux__ |
134 | void *vaddr = sg_page(pages->sgl); |
135 | #else |
136 | struct drm_dma_handle *phys = (void *)sg_page(pages->sgl); |
137 | void *vaddr = phys->vaddr; |
138 | #endif |
139 | |
140 | __i915_gem_object_release_shmem(obj, pages, false0); |
141 | |
142 | if (obj->mm.dirty) { |
143 | #ifdef __linux__ |
144 | struct address_space *mapping = obj->base.filp->f_mapping; |
145 | #endif |
146 | void *src = vaddr; |
147 | int i; |
148 | |
149 | for (i = 0; i < obj->base.size / PAGE_SIZE(1 << 12); i++) { |
150 | struct vm_page *page; |
151 | char *dst; |
152 | |
153 | #ifdef __linux__ |
154 | page = shmem_read_mapping_page(mapping, i); |
155 | if (IS_ERR(page)) |
156 | continue; |
157 | #else |
158 | struct pglist plist; |
159 | TAILQ_INIT(&plist)do { (&plist)->tqh_first = ((void *)0); (&plist)-> tqh_last = &(&plist)->tqh_first; } while (0); |
160 | if (uvm_obj_wire(obj->base.uao, i * PAGE_SIZE(1 << 12), |
161 | (i + 1) * PAGE_SIZE(1 << 12), &plist)) |
162 | continue; |
163 | page = TAILQ_FIRST(&plist)((&plist)->tqh_first); |
164 | #endif |
165 | |
166 | dst = kmap_atomic(page); |
167 | drm_clflush_virt_range(src, PAGE_SIZE(1 << 12)); |
168 | memcpy(dst, src, PAGE_SIZE)__builtin_memcpy((dst), (src), ((1 << 12))); |
169 | kunmap_atomic(dst); |
170 | |
171 | set_page_dirty(page)x86_atomic_clearbits_u32(&page->pg_flags, 0x00000008); |
172 | #ifdef __linux__ |
173 | if (obj->mm.madv == I915_MADV_WILLNEED0) |
174 | mark_page_accessed(page); |
175 | put_page(page); |
176 | #else |
177 | uvm_obj_unwire(obj->base.uao, i * PAGE_SIZE(1 << 12), |
178 | (i + 1) * PAGE_SIZE(1 << 12)); |
179 | #endif |
180 | |
181 | src += PAGE_SIZE(1 << 12); |
182 | } |
183 | obj->mm.dirty = false0; |
184 | } |
185 | |
186 | sg_free_table(pages); |
187 | kfree(pages); |
188 | |
189 | #ifdef __linux__ |
190 | dma_free_coherent(&obj->base.dev->pdev->dev, |
191 | roundup_pow_of_two(obj->base.size), |
192 | vaddr, dma); |
193 | #else |
194 | drm_pci_free(obj->base.dev, phys); |
195 | #endif |
196 | } |
197 | |
198 | static int |
199 | phys_pwrite(struct drm_i915_gem_object *obj, |
200 | const struct drm_i915_gem_pwrite *args) |
201 | { |
202 | void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; |
203 | char __user *user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)); |
204 | int err; |
205 | |
206 | err = i915_gem_object_wait(obj, |
207 | I915_WAIT_INTERRUPTIBLE(1UL << (0)) | |
208 | I915_WAIT_ALL(1UL << (2)), |
209 | MAX_SCHEDULE_TIMEOUT(0x7fffffff)); |
210 | if (err) |
211 | return err; |
212 | |
213 | /* |
214 | * We manually control the domain here and pretend that it |
215 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. |
216 | */ |
217 | i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); |
218 | |
219 | if (copy_from_user(vaddr, user_data, args->size)) |
220 | return -EFAULT14; |
221 | |
222 | drm_clflush_virt_range(vaddr, args->size); |
223 | intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); |
224 | |
225 | i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); |
226 | return 0; |
227 | } |
228 | |
229 | static int |
230 | phys_pread(struct drm_i915_gem_object *obj, |
231 | const struct drm_i915_gem_pread *args) |
232 | { |
233 | void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; |
234 | char __user *user_data = u64_to_user_ptr(args->data_ptr)((void *)(uintptr_t)(args->data_ptr)); |
235 | int err; |
236 | |
237 | err = i915_gem_object_wait(obj, |
238 | I915_WAIT_INTERRUPTIBLE(1UL << (0)), |
239 | MAX_SCHEDULE_TIMEOUT(0x7fffffff)); |
240 | if (err) |
241 | return err; |
242 | |
243 | drm_clflush_virt_range(vaddr, args->size); |
244 | if (copy_to_user(user_data, vaddr, args->size)) |
245 | return -EFAULT14; |
246 | |
247 | return 0; |
248 | } |
249 | |
250 | static void phys_release(struct drm_i915_gem_object *obj) |
251 | { |
252 | fput(obj->base.filp); |
253 | } |
254 | |
255 | static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { |
256 | .name = "i915_gem_object_phys", |
257 | .get_pages = i915_gem_object_get_pages_phys, |
258 | .put_pages = i915_gem_object_put_pages_phys, |
259 | |
260 | .pread = phys_pread, |
261 | .pwrite = phys_pwrite, |
262 | |
263 | .release = phys_release, |
264 | }; |
265 | |
266 | int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) |
267 | { |
268 | struct sg_table *pages; |
269 | int err; |
270 | |
271 | if (align > obj->base.size) |
272 | return -EINVAL22; |
273 | |
274 | if (obj->ops == &i915_gem_phys_ops) |
275 | return 0; |
276 | |
277 | if (obj->ops != &i915_gem_shmem_ops) |
278 | return -EINVAL22; |
279 | |
280 | err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE(1UL << (0))); |
281 | if (err) |
282 | return err; |
283 | |
284 | mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES)rw_enter_write(&obj->mm.lock); |
285 | |
286 | if (obj->mm.madv != I915_MADV_WILLNEED0) { |
287 | err = -EFAULT14; |
288 | goto err_unlock; |
289 | } |
290 | |
291 | if (obj->mm.quirked) { |
292 | err = -EFAULT14; |
293 | goto err_unlock; |
294 | } |
295 | |
296 | if (obj->mm.mapping) { |
297 | err = -EBUSY16; |
298 | goto err_unlock; |
299 | } |
300 | |
301 | pages = __i915_gem_object_unset_pages(obj); |
302 | |
303 | obj->ops = &i915_gem_phys_ops; |
304 | |
305 | err = ____i915_gem_object_get_pages(obj); |
306 | if (err) |
307 | goto err_xfer; |
308 | |
309 | /* Perma-pin (until release) the physical set of pages */ |
310 | __i915_gem_object_pin_pages(obj); |
311 | |
312 | if (!IS_ERR_OR_NULL(pages)) |
313 | i915_gem_shmem_ops.put_pages(obj, pages); |
314 | |
315 | i915_gem_object_release_memory_region(obj); |
316 | |
317 | mutex_unlock(&obj->mm.lock)rw_exit_write(&obj->mm.lock); |
318 | return 0; |
319 | |
320 | err_xfer: |
321 | obj->ops = &i915_gem_shmem_ops; |
322 | if (!IS_ERR_OR_NULL(pages)) { |
323 | unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl); |
324 | |
325 | __i915_gem_object_set_pages(obj, pages, sg_page_sizes); |
326 | } |
327 | err_unlock: |
328 | mutex_unlock(&obj->mm.lock)rw_exit_write(&obj->mm.lock); |
329 | return err; |
330 | } |
331 | |
332 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)0 |
333 | #include "selftests/i915_gem_phys.c" |
334 | #endif |