File: | dev/pci/drm/amd/amdgpu/amdgpu_fb.c |
Warning: | line 254, column 2 Value stored to 'tmp' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright © 2007 David Airlie |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * David Airlie |
25 | */ |
26 | |
27 | #include <linux/module.h> |
28 | #include <linux/pm_runtime.h> |
29 | #include <linux/slab.h> |
30 | #include <linux/vga_switcheroo.h> |
31 | |
32 | #include <drm/amdgpu_drm.h> |
33 | #include <drm/drm_crtc.h> |
34 | #include <drm/drm_crtc_helper.h> |
35 | #include <drm/drm_fb_helper.h> |
36 | #include <drm/drm_fourcc.h> |
37 | |
38 | #include "amdgpu.h" |
39 | #include "cikd.h" |
40 | #include "amdgpu_gem.h" |
41 | |
42 | #include "amdgpu_display.h" |
43 | |
44 | /* object hierarchy - |
45 | this contains a helper + a amdgpu fb |
46 | the helper contains a pointer to amdgpu framebuffer baseclass. |
47 | */ |
48 | |
49 | static int |
50 | amdgpufb_open(struct fb_info *info, int user) |
51 | { |
52 | struct drm_fb_helper *fb_helper = info->par; |
53 | int ret = pm_runtime_get_sync(fb_helper->dev->dev); |
54 | if (ret < 0 && ret != -EACCES13) { |
55 | pm_runtime_mark_last_busy(fb_helper->dev->dev); |
56 | pm_runtime_put_autosuspend(fb_helper->dev->dev); |
57 | return ret; |
58 | } |
59 | return 0; |
60 | } |
61 | |
62 | static int |
63 | amdgpufb_release(struct fb_info *info, int user) |
64 | { |
65 | struct drm_fb_helper *fb_helper = info->par; |
66 | |
67 | pm_runtime_mark_last_busy(fb_helper->dev->dev); |
68 | pm_runtime_put_autosuspend(fb_helper->dev->dev); |
69 | return 0; |
70 | } |
71 | |
72 | static const struct fb_ops amdgpufb_ops = { |
73 | #ifdef notyet |
74 | .owner = THIS_MODULE((void *)0), |
75 | DRM_FB_HELPER_DEFAULT_OPS.fb_set_par = drm_fb_helper_set_par, |
76 | .fb_open = amdgpufb_open, |
77 | .fb_release = amdgpufb_release, |
78 | .fb_fillrect = drm_fb_helper_cfb_fillrect, |
79 | .fb_copyarea = drm_fb_helper_cfb_copyarea, |
80 | .fb_imageblit = drm_fb_helper_cfb_imageblit, |
81 | #else |
82 | DRM_FB_HELPER_DEFAULT_OPS.fb_set_par = drm_fb_helper_set_par, |
83 | #endif |
84 | }; |
85 | |
86 | void amdgpu_burner_cb(void *); |
87 | |
88 | int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool_Bool tiled) |
89 | { |
90 | int aligned = width; |
91 | int pitch_mask = 0; |
92 | |
93 | switch (cpp) { |
94 | case 1: |
95 | pitch_mask = 255; |
96 | break; |
97 | case 2: |
98 | pitch_mask = 127; |
99 | break; |
100 | case 3: |
101 | case 4: |
102 | pitch_mask = 63; |
103 | break; |
104 | } |
105 | |
106 | aligned += pitch_mask; |
107 | aligned &= ~pitch_mask; |
108 | return aligned * cpp; |
109 | } |
110 | |
111 | static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) |
112 | { |
113 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
114 | int ret; |
115 | |
116 | ret = amdgpu_bo_reserve(abo, true1); |
117 | if (likely(ret == 0)__builtin_expect(!!(ret == 0), 1)) { |
118 | amdgpu_bo_kunmap(abo); |
119 | amdgpu_bo_unpin(abo); |
120 | amdgpu_bo_unreserve(abo); |
121 | } |
122 | drm_gem_object_put(gobj); |
123 | } |
124 | |
125 | static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, |
126 | struct drm_mode_fb_cmd2 *mode_cmd, |
127 | struct drm_gem_object **gobj_p) |
128 | { |
129 | const struct drm_format_info *info; |
130 | struct amdgpu_device *adev = rfbdev->adev; |
131 | struct drm_gem_object *gobj = NULL((void *)0); |
132 | struct amdgpu_bo *abo = NULL((void *)0); |
133 | bool_Bool fb_tiled = false0; /* useful for testing */ |
134 | u32 tiling_flags = 0, domain; |
135 | int ret; |
136 | int aligned_size, size; |
137 | int height = mode_cmd->height; |
138 | u32 cpp; |
139 | u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED(1 << 0) | |
140 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS(1 << 5) | |
141 | AMDGPU_GEM_CREATE_VRAM_CLEARED(1 << 3); |
142 | |
143 | info = drm_get_format_info(adev_to_drm(adev), mode_cmd); |
144 | cpp = info->cpp[0]; |
145 | |
146 | /* need to align pitch with crtc limits */ |
147 | mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, |
148 | fb_tiled); |
149 | domain = amdgpu_display_supported_domains(adev, flags); |
150 | height = roundup2(mode_cmd->height, 8)(((mode_cmd->height) + ((8) - 1)) & (~((__typeof(mode_cmd ->height))(8) - 1))); |
151 | size = mode_cmd->pitches[0] * height; |
152 | aligned_size = roundup2(size, PAGE_SIZE)(((size) + (((1 << 12)) - 1)) & (~((__typeof(size)) ((1 << 12)) - 1))); |
153 | ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags, |
154 | ttm_bo_type_device, NULL((void *)0), &gobj); |
155 | if (ret) { |
156 | pr_err("failed to allocate framebuffer (%d)\n", aligned_size)printk("\0013" "amdgpu: " "failed to allocate framebuffer (%d)\n" , aligned_size); |
157 | return -ENOMEM12; |
158 | } |
159 | abo = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
160 | |
161 | if (fb_tiled) |
162 | tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1)(((__u64)(4) & 0xf) << 0); |
163 | |
164 | ret = amdgpu_bo_reserve(abo, false0); |
165 | if (unlikely(ret != 0)__builtin_expect(!!(ret != 0), 0)) |
166 | goto out_unref; |
167 | |
168 | if (tiling_flags) { |
169 | ret = amdgpu_bo_set_tiling_flags(abo, |
170 | tiling_flags); |
171 | if (ret) |
172 | dev_err(adev->dev, "FB failed to set tiling flags\n")printf("drm:pid%d:%s *ERROR* " "FB failed to set tiling flags\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
173 | } |
174 | |
175 | ret = amdgpu_bo_pin(abo, domain); |
176 | if (ret) { |
177 | amdgpu_bo_unreserve(abo); |
178 | goto out_unref; |
179 | } |
180 | |
181 | ret = amdgpu_ttm_alloc_gart(&abo->tbo); |
182 | if (ret) { |
183 | amdgpu_bo_unreserve(abo); |
184 | dev_err(adev->dev, "%p bind failed\n", abo)printf("drm:pid%d:%s *ERROR* " "%p bind failed\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , abo); |
185 | goto out_unref; |
186 | } |
187 | |
188 | ret = amdgpu_bo_kmap(abo, NULL((void *)0)); |
189 | amdgpu_bo_unreserve(abo); |
190 | if (ret) { |
191 | goto out_unref; |
192 | } |
193 | |
194 | *gobj_p = gobj; |
195 | return 0; |
196 | out_unref: |
197 | amdgpufb_destroy_pinned_object(gobj); |
198 | *gobj_p = NULL((void *)0); |
199 | return ret; |
200 | } |
201 | |
202 | static int amdgpufb_create(struct drm_fb_helper *helper, |
203 | struct drm_fb_helper_surface_size *sizes) |
204 | { |
205 | struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper; |
206 | struct amdgpu_device *adev = rfbdev->adev; |
207 | struct fb_info *info; |
208 | struct rasops_info *ri = &adev->ro; |
209 | struct drm_framebuffer *fb = NULL((void *)0); |
210 | struct drm_mode_fb_cmd2 mode_cmd; |
211 | struct drm_gem_object *gobj = NULL((void *)0); |
212 | struct amdgpu_bo *abo = NULL((void *)0); |
213 | int ret; |
214 | unsigned long tmp; |
215 | |
216 | mode_cmd.width = sizes->surface_width; |
217 | mode_cmd.height = sizes->surface_height; |
218 | |
219 | if (sizes->surface_bpp == 24) |
220 | sizes->surface_bpp = 32; |
221 | |
222 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, |
223 | sizes->surface_depth); |
224 | |
225 | ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj); |
226 | if (ret) { |
227 | DRM_ERROR("failed to create fbcon object %d\n", ret)__drm_err("failed to create fbcon object %d\n", ret); |
228 | return ret; |
229 | } |
230 | |
231 | abo = gem_to_amdgpu_bo(gobj)({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((gobj)); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_bo, tbo.base) );}); |
232 | |
233 | /* okay we have an object now allocate the framebuffer */ |
234 | info = drm_fb_helper_alloc_fbi(helper); |
235 | if (IS_ERR(info)) { |
236 | ret = PTR_ERR(info); |
237 | goto out; |
238 | } |
239 | |
240 | ret = amdgpu_display_framebuffer_init(adev_to_drm(adev), &rfbdev->rfb, |
241 | &mode_cmd, gobj); |
242 | if (ret) { |
243 | DRM_ERROR("failed to initialize framebuffer %d\n", ret)__drm_err("failed to initialize framebuffer %d\n", ret); |
244 | goto out; |
245 | } |
246 | |
247 | fb = &rfbdev->rfb.base; |
248 | |
249 | /* setup helper */ |
250 | rfbdev->helper.fb = fb; |
251 | |
252 | info->fbops = &amdgpufb_ops; |
253 | |
254 | tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start; |
Value stored to 'tmp' is never read | |
255 | #ifdef __linux__ |
256 | info->fix.smem_start = adev->gmc.aper_base + tmp; |
257 | info->fix.smem_len = amdgpu_bo_size(abo); |
258 | info->screen_base = amdgpu_bo_kptr(abo); |
259 | info->screen_size = amdgpu_bo_size(abo); |
260 | #endif |
261 | |
262 | drm_fb_helper_fill_info(info, &rfbdev->helper, sizes); |
263 | |
264 | #ifdef __linux__ |
265 | /* setup aperture base/size for vesafb takeover */ |
266 | info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base; |
267 | info->apertures->ranges[0].size = adev->gmc.aper_size; |
268 | #endif |
269 | |
270 | /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
271 | |
272 | #ifdef __linux__ |
273 | if (info->screen_base == NULL((void *)0)) { |
274 | ret = -ENOSPC28; |
275 | goto out; |
276 | } |
277 | |
278 | DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start)printk("\0016" "[" "drm" "] " "fb mappable at 0x%lX\n", info-> fix.smem_start); |
279 | #endif |
280 | DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base)printk("\0016" "[" "drm" "] " "vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base); |
281 | DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo))printk("\0016" "[" "drm" "] " "size %lu\n", (unsigned long)amdgpu_bo_size (abo)); |
282 | DRM_INFO("fb depth is %d\n", fb->format->depth)printk("\0016" "[" "drm" "] " "fb depth is %d\n", fb->format ->depth); |
283 | DRM_INFO(" pitch is %d\n", fb->pitches[0])printk("\0016" "[" "drm" "] " " pitch is %d\n", fb->pitches [0]); |
284 | |
285 | ri->ri_bits = amdgpu_bo_kptr(abo); |
286 | ri->ri_depth = fb->format->cpp[0] * 8; |
287 | ri->ri_stride = fb->pitches[0]; |
288 | ri->ri_width = sizes->fb_width; |
289 | ri->ri_height = sizes->fb_height; |
290 | |
291 | switch (fb->format->format) { |
292 | case DRM_FORMAT_XRGB8888((__u32)('X') | ((__u32)('R') << 8) | ((__u32)('2') << 16) | ((__u32)('4') << 24)): |
293 | ri->ri_rnum = 8; |
294 | ri->ri_rpos = 16; |
295 | ri->ri_gnum = 8; |
296 | ri->ri_gpos = 8; |
297 | ri->ri_bnum = 8; |
298 | ri->ri_bpos = 0; |
299 | break; |
300 | case DRM_FORMAT_RGB565((__u32)('R') | ((__u32)('G') << 8) | ((__u32)('1') << 16) | ((__u32)('6') << 24)): |
301 | ri->ri_rnum = 5; |
302 | ri->ri_rpos = 11; |
303 | ri->ri_gnum = 6; |
304 | ri->ri_gpos = 5; |
305 | ri->ri_bnum = 5; |
306 | ri->ri_bpos = 0; |
307 | break; |
308 | } |
309 | |
310 | vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info); |
311 | return 0; |
312 | |
313 | out: |
314 | if (abo) { |
315 | |
316 | } |
317 | if (fb && ret) { |
318 | drm_gem_object_put(gobj); |
319 | drm_framebuffer_unregister_private(fb); |
320 | drm_framebuffer_cleanup(fb); |
321 | kfree(fb); |
322 | } |
323 | return ret; |
324 | } |
325 | |
326 | static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) |
327 | { |
328 | struct amdgpu_framebuffer *rfb = &rfbdev->rfb; |
329 | int i; |
330 | |
331 | drm_fb_helper_unregister_fbi(&rfbdev->helper); |
332 | |
333 | if (rfb->base.obj[0]) { |
334 | for (i = 0; i < rfb->base.format->num_planes; i++) |
335 | drm_gem_object_put(rfb->base.obj[0]); |
336 | amdgpufb_destroy_pinned_object(rfb->base.obj[0]); |
337 | rfb->base.obj[0] = NULL((void *)0); |
338 | drm_framebuffer_unregister_private(&rfb->base); |
339 | drm_framebuffer_cleanup(&rfb->base); |
340 | } |
341 | drm_fb_helper_fini(&rfbdev->helper); |
342 | |
343 | return 0; |
344 | } |
345 | |
346 | static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = { |
347 | .fb_probe = amdgpufb_create, |
348 | }; |
349 | |
350 | int amdgpu_fbdev_init(struct amdgpu_device *adev) |
351 | { |
352 | struct amdgpu_fbdev *rfbdev; |
353 | int bpp_sel = 32; |
354 | int ret; |
355 | |
356 | /* don't init fbdev on hw without DCE */ |
357 | if (!adev->mode_info.mode_config_initialized) |
358 | return 0; |
359 | |
360 | /* don't init fbdev if there are no connectors */ |
361 | if (list_empty(&adev_to_drm(adev)->mode_config.connector_list)) |
362 | return 0; |
363 | |
364 | /* select 8 bpp console on low vram cards */ |
365 | if (adev->gmc.real_vram_size <= (32*1024*1024)) |
366 | bpp_sel = 8; |
367 | |
368 | rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL(0x0001 | 0x0004)); |
369 | if (!rfbdev) |
370 | return -ENOMEM12; |
371 | |
372 | rfbdev->adev = adev; |
373 | adev->mode_info.rfbdev = rfbdev; |
374 | |
375 | drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper, |
376 | &amdgpu_fb_helper_funcs); |
377 | |
378 | ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper); |
379 | if (ret) { |
380 | kfree(rfbdev); |
381 | return ret; |
382 | } |
383 | |
384 | task_set(&adev->burner_task, amdgpu_burner_cb, adev); |
385 | |
386 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
387 | if (!amdgpu_device_has_dc_support(adev)) |
388 | drm_helper_disable_unused_functions(adev_to_drm(adev)); |
389 | |
390 | drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); |
391 | return 0; |
392 | } |
393 | |
394 | void amdgpu_fbdev_fini(struct amdgpu_device *adev) |
395 | { |
396 | if (!adev->mode_info.rfbdev) |
397 | return; |
398 | |
399 | task_del(systq, &adev->burner_task); |
400 | |
401 | amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev); |
402 | kfree(adev->mode_info.rfbdev); |
403 | adev->mode_info.rfbdev = NULL((void *)0); |
404 | } |
405 | |
406 | void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) |
407 | { |
408 | if (adev->mode_info.rfbdev) |
409 | drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper, |
410 | state); |
411 | } |
412 | |
413 | int amdgpu_fbdev_total_size(struct amdgpu_device *adev) |
414 | { |
415 | struct amdgpu_bo *robj; |
416 | int size = 0; |
417 | |
418 | if (!adev->mode_info.rfbdev) |
419 | return 0; |
420 | |
421 | robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0])({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((adev->mode_info.rfbdev->rfb.base.obj[0])); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_bo , tbo.base) );}); |
422 | size += amdgpu_bo_size(robj); |
423 | return size; |
424 | } |
425 | |
426 | bool_Bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) |
427 | { |
428 | if (!adev->mode_info.rfbdev) |
429 | return false0; |
430 | if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0])({ const __typeof( ((struct amdgpu_bo *)0)->tbo.base ) *__mptr = ((adev->mode_info.rfbdev->rfb.base.obj[0])); (struct amdgpu_bo *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_bo , tbo.base) );})) |
431 | return true1; |
432 | return false0; |
433 | } |
434 | |
435 | void |
436 | amdgpu_burner(void *v, u_int on, u_int flags) |
437 | { |
438 | struct rasops_info *ri = v; |
439 | struct amdgpu_device *adev = ri->ri_hw; |
440 | |
441 | task_del(systq, &adev->burner_task); |
442 | |
443 | if (on) |
444 | adev->burner_fblank = FB_BLANK_UNBLANK0; |
445 | else { |
446 | if (flags & WSDISPLAY_BURN_VBLANK0x0001) |
447 | adev->burner_fblank = FB_BLANK_VSYNC_SUSPEND3; |
448 | else |
449 | adev->burner_fblank = FB_BLANK_NORMAL1; |
450 | } |
451 | |
452 | /* |
453 | * Setting the DPMS mode may sleep while waiting for vblank so |
454 | * hand things off to a taskq. |
455 | */ |
456 | task_add(systq, &adev->burner_task); |
457 | } |
458 | |
459 | void |
460 | amdgpu_burner_cb(void *arg1) |
461 | { |
462 | struct amdgpu_device *adev = arg1; |
463 | struct drm_fb_helper *helper = &adev->mode_info.rfbdev->helper; |
464 | |
465 | drm_fb_helper_blank(adev->burner_fblank, helper->fbdev); |
466 | } |