File: | dev/pci/drm/i915/display/skl_watermark.c |
Warning: | line 3572, column 20 Value stored to 'minor' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright © 2022 Intel Corporation |
4 | */ |
5 | |
6 | #include <drm/drm_blend.h> |
7 | |
8 | #include "intel_atomic.h" |
9 | #include "intel_atomic_plane.h" |
10 | #include "intel_bw.h" |
11 | #include "intel_de.h" |
12 | #include "intel_display.h" |
13 | #include "intel_display_power.h" |
14 | #include "intel_display_types.h" |
15 | #include "intel_fb.h" |
16 | #include "skl_watermark.h" |
17 | |
18 | #include "i915_drv.h" |
19 | #include "i915_fixed.h" |
20 | #include "i915_reg.h" |
21 | #include "intel_pcode.h" |
22 | #include "intel_pm.h" |
23 | |
24 | static void skl_sagv_disable(struct drm_i915_privateinteldrm_softc *i915); |
25 | |
26 | /* Stores plane specific WM parameters */ |
27 | struct skl_wm_params { |
28 | bool_Bool x_tiled, y_tiled; |
29 | bool_Bool rc_surface; |
30 | bool_Bool is_planar; |
31 | u32 width; |
32 | u8 cpp; |
33 | u32 plane_pixel_rate; |
34 | u32 y_min_scanlines; |
35 | u32 plane_bytes_per_line; |
36 | uint_fixed_16_16_t plane_blocks_per_line; |
37 | uint_fixed_16_16_t y_tile_minimum; |
38 | u32 linetime_us; |
39 | u32 dbuf_block_size; |
40 | }; |
41 | |
42 | u8 intel_enabled_dbuf_slices_mask(struct drm_i915_privateinteldrm_softc *i915) |
43 | { |
44 | u8 enabled_slices = 0; |
45 | enum dbuf_slice slice; |
46 | |
47 | for_each_dbuf_slice(i915, slice)for ((slice) = DBUF_S1; (slice) < I915_MAX_DBUF_SLICES; (slice )++) if (!((&(i915)->__info)->display.dbuf.slice_mask & (1UL << (slice)))) {} else { |
48 | if (intel_uncore_read(&i915->uncore, |
49 | DBUF_CTL_S(slice)((const i915_reg_t){ .reg = ((((const u32 []){ 0x45008, 0x44FE8 , 0x44300, 0x44304 })[slice])) })) & DBUF_POWER_STATE((u32)((1UL << (30)) + 0))) |
50 | enabled_slices |= BIT(slice)(1UL << (slice)); |
51 | } |
52 | |
53 | return enabled_slices; |
54 | } |
55 | |
56 | /* |
57 | * FIXME: We still don't have the proper code detect if we need to apply the WA, |
58 | * so assume we'll always need it in order to avoid underruns. |
59 | */ |
60 | static bool_Bool skl_needs_memory_bw_wa(struct drm_i915_privateinteldrm_softc *i915) |
61 | { |
62 | return DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) == 9; |
63 | } |
64 | |
65 | static bool_Bool |
66 | intel_has_sagv(struct drm_i915_privateinteldrm_softc *i915) |
67 | { |
68 | return DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 9 && !IS_LP(i915)((&(i915)->__info)->is_lp) && |
69 | i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED; |
70 | } |
71 | |
72 | static u32 |
73 | intel_sagv_block_time(struct drm_i915_privateinteldrm_softc *i915) |
74 | { |
75 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 14) { |
76 | u32 val; |
77 | |
78 | val = intel_uncore_read(&i915->uncore, MTL_LATENCY_SAGV((const i915_reg_t){ .reg = (0x4578b) })); |
79 | |
80 | return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (0))) + 0))))(((val) & (((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (0))) + 0)) )) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - ( 12) - 1)) & ((~0UL) << (0))) + 0))) - 1)))); |
81 | } else if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 12) { |
82 | u32 val = 0; |
83 | int ret; |
84 | |
85 | ret = snb_pcode_read(&i915->uncore, |
86 | GEN12_PCODE_READ_SAGV_BLOCK_TIME_US0x23, |
87 | &val, NULL((void *)0)); |
88 | if (ret) { |
89 | drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Couldn't read SAGV block time!\n" ); |
90 | return 0; |
91 | } |
92 | |
93 | return val; |
94 | } else if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) == 11) { |
95 | return 10; |
96 | } else if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) == 9 && !IS_LP(i915)((&(i915)->__info)->is_lp)) { |
97 | return 30; |
98 | } else { |
99 | return 0; |
100 | } |
101 | } |
102 | |
103 | static void intel_sagv_init(struct drm_i915_privateinteldrm_softc *i915) |
104 | { |
105 | if (!intel_has_sagv(i915)) |
106 | i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; |
107 | |
108 | /* |
109 | * Probe to see if we have working SAGV control. |
110 | * For icl+ this was already determined by intel_bw_init_hw(). |
111 | */ |
112 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) < 11) |
113 | skl_sagv_disable(i915); |
114 | |
115 | drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN)({ int __ret = !!((i915->display.sagv.status == I915_SAGV_UNKNOWN )); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "i915->display.sagv.status == I915_SAGV_UNKNOWN" ")"); __builtin_expect(!!(__ret), 0); }); |
116 | |
117 | i915->display.sagv.block_time_us = intel_sagv_block_time(i915); |
118 | |
119 | drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "SAGV supported: %s, original SAGV block time: %u us\n" , str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us ) |
120 | str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "SAGV supported: %s, original SAGV block time: %u us\n" , str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us ); |
121 | |
122 | /* avoid overflow when adding with wm0 latency/etc. */ |
123 | if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,({ int __ret = !!(i915->display.sagv.block_time_us > 0xffff ); if (__ret) printf("%s %s: " "Excessive SAGV block time %u, ignoring\n" , dev_driver_string((&i915->drm)->dev), "", i915-> display.sagv.block_time_us); __builtin_expect(!!(__ret), 0); } ) |
124 | "Excessive SAGV block time %u, ignoring\n",({ int __ret = !!(i915->display.sagv.block_time_us > 0xffff ); if (__ret) printf("%s %s: " "Excessive SAGV block time %u, ignoring\n" , dev_driver_string((&i915->drm)->dev), "", i915-> display.sagv.block_time_us); __builtin_expect(!!(__ret), 0); } ) |
125 | i915->display.sagv.block_time_us)({ int __ret = !!(i915->display.sagv.block_time_us > 0xffff ); if (__ret) printf("%s %s: " "Excessive SAGV block time %u, ignoring\n" , dev_driver_string((&i915->drm)->dev), "", i915-> display.sagv.block_time_us); __builtin_expect(!!(__ret), 0); } )) |
126 | i915->display.sagv.block_time_us = 0; |
127 | |
128 | if (!intel_has_sagv(i915)) |
129 | i915->display.sagv.block_time_us = 0; |
130 | } |
131 | |
132 | /* |
133 | * SAGV dynamically adjusts the system agent voltage and clock frequencies |
134 | * depending on power and performance requirements. The display engine access |
135 | * to system memory is blocked during the adjustment time. Because of the |
136 | * blocking time, having this enabled can cause full system hangs and/or pipe |
137 | * underruns if we don't meet all of the following requirements: |
138 | * |
139 | * - <= 1 pipe enabled |
140 | * - All planes can enable watermarks for latencies >= SAGV engine block time |
141 | * - We're not using an interlaced display configuration |
142 | */ |
143 | static void skl_sagv_enable(struct drm_i915_privateinteldrm_softc *i915) |
144 | { |
145 | int ret; |
146 | |
147 | if (!intel_has_sagv(i915)) |
148 | return; |
149 | |
150 | if (i915->display.sagv.status == I915_SAGV_ENABLED) |
151 | return; |
152 | |
153 | drm_dbg_kms(&i915->drm, "Enabling SAGV\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Enabling SAGV\n"); |
154 | ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,snb_pcode_write_timeout(&i915->uncore, 0x21, 0x3, 500, 0) |
155 | GEN9_SAGV_ENABLE)snb_pcode_write_timeout(&i915->uncore, 0x21, 0x3, 500, 0); |
156 | |
157 | /* We don't need to wait for SAGV when enabling */ |
158 | |
159 | /* |
160 | * Some skl systems, pre-release machines in particular, |
161 | * don't actually have SAGV. |
162 | */ |
163 | if (IS_SKYLAKE(i915)IS_PLATFORM(i915, INTEL_SKYLAKE) && ret == -ENXIO6) { |
164 | drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "No SAGV found on system, ignoring\n" ); |
165 | i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; |
166 | return; |
167 | } else if (ret < 0) { |
168 | drm_err(&i915->drm, "Failed to enable SAGV\n")printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to enable SAGV\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
169 | return; |
170 | } |
171 | |
172 | i915->display.sagv.status = I915_SAGV_ENABLED; |
173 | } |
174 | |
175 | static void skl_sagv_disable(struct drm_i915_privateinteldrm_softc *i915) |
176 | { |
177 | int ret; |
178 | |
179 | if (!intel_has_sagv(i915)) |
180 | return; |
181 | |
182 | if (i915->display.sagv.status == I915_SAGV_DISABLED) |
183 | return; |
184 | |
185 | drm_dbg_kms(&i915->drm, "Disabling SAGV\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Disabling SAGV\n"); |
186 | /* bspec says to keep retrying for at least 1 ms */ |
187 | ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL0x21, |
188 | GEN9_SAGV_DISABLE0x0, |
189 | GEN9_SAGV_IS_DISABLED0x1, GEN9_SAGV_IS_DISABLED0x1, |
190 | 1); |
191 | /* |
192 | * Some skl systems, pre-release machines in particular, |
193 | * don't actually have SAGV. |
194 | */ |
195 | if (IS_SKYLAKE(i915)IS_PLATFORM(i915, INTEL_SKYLAKE) && ret == -ENXIO6) { |
196 | drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "No SAGV found on system, ignoring\n" ); |
197 | i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; |
198 | return; |
199 | } else if (ret < 0) { |
200 | drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "Failed to disable SAGV (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ret); |
201 | return; |
202 | } |
203 | |
204 | i915->display.sagv.status = I915_SAGV_DISABLED; |
205 | } |
206 | |
207 | static void skl_sagv_pre_plane_update(struct intel_atomic_state *state) |
208 | { |
209 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
210 | const struct intel_bw_state *new_bw_state = |
211 | intel_atomic_get_new_bw_state(state); |
212 | |
213 | if (!new_bw_state) |
214 | return; |
215 | |
216 | if (!intel_can_enable_sagv(i915, new_bw_state)) |
217 | skl_sagv_disable(i915); |
218 | } |
219 | |
220 | static void skl_sagv_post_plane_update(struct intel_atomic_state *state) |
221 | { |
222 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
223 | const struct intel_bw_state *new_bw_state = |
224 | intel_atomic_get_new_bw_state(state); |
225 | |
226 | if (!new_bw_state) |
227 | return; |
228 | |
229 | if (intel_can_enable_sagv(i915, new_bw_state)) |
230 | skl_sagv_enable(i915); |
231 | } |
232 | |
233 | static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) |
234 | { |
235 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
236 | const struct intel_bw_state *old_bw_state = |
237 | intel_atomic_get_old_bw_state(state); |
238 | const struct intel_bw_state *new_bw_state = |
239 | intel_atomic_get_new_bw_state(state); |
240 | u16 old_mask, new_mask; |
241 | |
242 | if (!new_bw_state) |
243 | return; |
244 | |
245 | old_mask = old_bw_state->qgv_points_mask; |
246 | new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; |
247 | |
248 | if (old_mask == new_mask) |
249 | return; |
250 | |
251 | WARN_ON(!new_bw_state->base.changed)({ int __ret = !!(!new_bw_state->base.changed); if (__ret) printf("WARNING %s failed at %s:%d\n", "!new_bw_state->base.changed" , "/usr/src/sys/dev/pci/drm/i915/display/skl_watermark.c", 251 ); __builtin_expect(!!(__ret), 0); }); |
252 | |
253 | drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Restricting QGV points: 0x%x -> 0x%x\n" , old_mask, new_mask) |
254 | old_mask, new_mask)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Restricting QGV points: 0x%x -> 0x%x\n" , old_mask, new_mask); |
255 | |
256 | /* |
257 | * Restrict required qgv points before updating the configuration. |
258 | * According to BSpec we can't mask and unmask qgv points at the same |
259 | * time. Also masking should be done before updating the configuration |
260 | * and unmasking afterwards. |
261 | */ |
262 | icl_pcode_restrict_qgv_points(i915, new_mask); |
263 | } |
264 | |
265 | static void icl_sagv_post_plane_update(struct intel_atomic_state *state) |
266 | { |
267 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
268 | const struct intel_bw_state *old_bw_state = |
269 | intel_atomic_get_old_bw_state(state); |
270 | const struct intel_bw_state *new_bw_state = |
271 | intel_atomic_get_new_bw_state(state); |
272 | u16 old_mask, new_mask; |
273 | |
274 | if (!new_bw_state) |
275 | return; |
276 | |
277 | old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; |
278 | new_mask = new_bw_state->qgv_points_mask; |
279 | |
280 | if (old_mask == new_mask) |
281 | return; |
282 | |
283 | WARN_ON(!new_bw_state->base.changed)({ int __ret = !!(!new_bw_state->base.changed); if (__ret) printf("WARNING %s failed at %s:%d\n", "!new_bw_state->base.changed" , "/usr/src/sys/dev/pci/drm/i915/display/skl_watermark.c", 283 ); __builtin_expect(!!(__ret), 0); }); |
284 | |
285 | drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Relaxing QGV points: 0x%x -> 0x%x\n" , old_mask, new_mask) |
286 | old_mask, new_mask)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Relaxing QGV points: 0x%x -> 0x%x\n" , old_mask, new_mask); |
287 | |
288 | /* |
289 | * Allow required qgv points after updating the configuration. |
290 | * According to BSpec we can't mask and unmask qgv points at the same |
291 | * time. Also masking should be done before updating the configuration |
292 | * and unmasking afterwards. |
293 | */ |
294 | icl_pcode_restrict_qgv_points(i915, new_mask); |
295 | } |
296 | |
297 | void intel_sagv_pre_plane_update(struct intel_atomic_state *state) |
298 | { |
299 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
300 | |
301 | /* |
302 | * Just return if we can't control SAGV or don't have it. |
303 | * This is different from situation when we have SAGV but just can't |
304 | * afford it due to DBuf limitation - in case if SAGV is completely |
305 | * disabled in a BIOS, we are not even allowed to send a PCode request, |
306 | * as it will throw an error. So have to check it here. |
307 | */ |
308 | if (!intel_has_sagv(i915)) |
309 | return; |
310 | |
311 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11) |
312 | icl_sagv_pre_plane_update(state); |
313 | else |
314 | skl_sagv_pre_plane_update(state); |
315 | } |
316 | |
317 | void intel_sagv_post_plane_update(struct intel_atomic_state *state) |
318 | { |
319 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
320 | |
321 | /* |
322 | * Just return if we can't control SAGV or don't have it. |
323 | * This is different from situation when we have SAGV but just can't |
324 | * afford it due to DBuf limitation - in case if SAGV is completely |
325 | * disabled in a BIOS, we are not even allowed to send a PCode request, |
326 | * as it will throw an error. So have to check it here. |
327 | */ |
328 | if (!intel_has_sagv(i915)) |
329 | return; |
330 | |
331 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11) |
332 | icl_sagv_post_plane_update(state); |
333 | else |
334 | skl_sagv_post_plane_update(state); |
335 | } |
336 | |
337 | static bool_Bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) |
338 | { |
339 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); |
340 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
341 | enum plane_id plane_id; |
342 | int max_level = INT_MAX0x7fffffff; |
343 | |
344 | if (!intel_has_sagv(i915)) |
345 | return false0; |
346 | |
347 | if (!crtc_state->hw.active) |
348 | return true1; |
349 | |
350 | if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE(1<<4)) |
351 | return false0; |
352 | |
353 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { |
354 | const struct skl_plane_wm *wm = |
355 | &crtc_state->wm.skl.optimal.planes[plane_id]; |
356 | int level; |
357 | |
358 | /* Skip this plane if it's not enabled */ |
359 | if (!wm->wm[0].enable) |
360 | continue; |
361 | |
362 | /* Find the highest enabled wm level for this plane */ |
363 | for (level = ilk_wm_max_level(i915); |
364 | !wm->wm[level].enable; --level) |
365 | { } |
366 | |
367 | /* Highest common enabled wm level for all planes */ |
368 | max_level = min(level, max_level)(((level)<(max_level))?(level):(max_level)); |
369 | } |
370 | |
371 | /* No enabled planes? */ |
372 | if (max_level == INT_MAX0x7fffffff) |
373 | return true1; |
374 | |
375 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { |
376 | const struct skl_plane_wm *wm = |
377 | &crtc_state->wm.skl.optimal.planes[plane_id]; |
378 | |
379 | /* |
380 | * All enabled planes must have enabled a common wm level that |
381 | * can tolerate memory latencies higher than sagv_block_time_us |
382 | */ |
383 | if (wm->wm[0].enable && !wm->wm[max_level].can_sagv) |
384 | return false0; |
385 | } |
386 | |
387 | return true1; |
388 | } |
389 | |
390 | static bool_Bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) |
391 | { |
392 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); |
393 | enum plane_id plane_id; |
394 | |
395 | if (!crtc_state->hw.active) |
396 | return true1; |
397 | |
398 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { |
399 | const struct skl_plane_wm *wm = |
400 | &crtc_state->wm.skl.optimal.planes[plane_id]; |
401 | |
402 | if (wm->wm[0].enable && !wm->sagv.wm0.enable) |
403 | return false0; |
404 | } |
405 | |
406 | return true1; |
407 | } |
408 | |
409 | static bool_Bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) |
410 | { |
411 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); |
412 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
413 | |
414 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 12) |
415 | return tgl_crtc_can_enable_sagv(crtc_state); |
416 | else |
417 | return skl_crtc_can_enable_sagv(crtc_state); |
418 | } |
419 | |
420 | bool_Bool intel_can_enable_sagv(struct drm_i915_privateinteldrm_softc *i915, |
421 | const struct intel_bw_state *bw_state) |
422 | { |
423 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) < 11 && |
424 | bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes)(((bw_state->active_pipes) != 0) && (((bw_state-> active_pipes) - 1) & (bw_state->active_pipes)) == 0)) |
425 | return false0; |
426 | |
427 | return bw_state->pipe_sagv_reject == 0; |
428 | } |
429 | |
430 | static int intel_compute_sagv_mask(struct intel_atomic_state *state) |
431 | { |
432 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
433 | int ret; |
434 | struct intel_crtc *crtc; |
435 | struct intel_crtc_state *new_crtc_state; |
436 | struct intel_bw_state *new_bw_state = NULL((void *)0); |
437 | const struct intel_bw_state *old_bw_state = NULL((void *)0); |
438 | int i; |
439 | |
440 | for_each_new_intel_crtc_in_state(state, crtc,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else |
441 | new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else { |
442 | new_bw_state = intel_atomic_get_bw_state(state); |
443 | if (IS_ERR(new_bw_state)) |
444 | return PTR_ERR(new_bw_state); |
445 | |
446 | old_bw_state = intel_atomic_get_old_bw_state(state); |
447 | |
448 | if (intel_crtc_can_enable_sagv(new_crtc_state)) |
449 | new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe)(1UL << (crtc->pipe)); |
450 | else |
451 | new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe)(1UL << (crtc->pipe)); |
452 | } |
453 | |
454 | if (!new_bw_state) |
455 | return 0; |
456 | |
457 | new_bw_state->active_pipes = |
458 | intel_calc_active_pipes(state, old_bw_state->active_pipes); |
459 | |
460 | if (new_bw_state->active_pipes != old_bw_state->active_pipes) { |
461 | ret = intel_atomic_lock_global_state(&new_bw_state->base); |
462 | if (ret) |
463 | return ret; |
464 | } |
465 | |
466 | if (intel_can_enable_sagv(i915, new_bw_state) != |
467 | intel_can_enable_sagv(i915, old_bw_state)) { |
468 | ret = intel_atomic_serialize_global_state(&new_bw_state->base); |
469 | if (ret) |
470 | return ret; |
471 | } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { |
472 | ret = intel_atomic_lock_global_state(&new_bw_state->base); |
473 | if (ret) |
474 | return ret; |
475 | } |
476 | |
477 | for_each_new_intel_crtc_in_state(state, crtc,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else |
478 | new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else { |
479 | struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; |
480 | |
481 | /* |
482 | * We store use_sagv_wm in the crtc state rather than relying on |
483 | * that bw state since we have no convenient way to get at the |
484 | * latter from the plane commit hooks (especially in the legacy |
485 | * cursor case) |
486 | */ |
487 | pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915)(((&(i915)->__runtime)->display.ip.ver) >= 13 && !((&(i915)->__info)->is_dgfx)) && |
488 | DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 12 && |
489 | intel_can_enable_sagv(i915, new_bw_state); |
490 | } |
491 | |
492 | return 0; |
493 | } |
494 | |
495 | static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry, |
496 | u16 start, u16 end) |
497 | { |
498 | entry->start = start; |
499 | entry->end = end; |
500 | |
501 | return end; |
502 | } |
503 | |
504 | static int intel_dbuf_slice_size(struct drm_i915_privateinteldrm_softc *i915) |
505 | { |
506 | return INTEL_INFO(i915)(&(i915)->__info)->display.dbuf.size / |
507 | hweight8(INTEL_INFO(i915)(&(i915)->__info)->display.dbuf.slice_mask); |
508 | } |
509 | |
510 | static void |
511 | skl_ddb_entry_for_slices(struct drm_i915_privateinteldrm_softc *i915, u8 slice_mask, |
512 | struct skl_ddb_entry *ddb) |
513 | { |
514 | int slice_size = intel_dbuf_slice_size(i915); |
515 | |
516 | if (!slice_mask) { |
517 | ddb->start = 0; |
518 | ddb->end = 0; |
519 | return; |
520 | } |
521 | |
522 | ddb->start = (ffs(slice_mask) - 1) * slice_size; |
523 | ddb->end = fls(slice_mask) * slice_size; |
524 | |
525 | WARN_ON(ddb->start >= ddb->end)({ int __ret = !!(ddb->start >= ddb->end); if (__ret ) printf("WARNING %s failed at %s:%d\n", "ddb->start >= ddb->end" , "/usr/src/sys/dev/pci/drm/i915/display/skl_watermark.c", 525 ); __builtin_expect(!!(__ret), 0); }); |
526 | WARN_ON(ddb->end > INTEL_INFO(i915)->display.dbuf.size)({ int __ret = !!(ddb->end > (&(i915)->__info)-> display.dbuf.size); if (__ret) printf("WARNING %s failed at %s:%d\n" , "ddb->end > (&(i915)->__info)->display.dbuf.size" , "/usr/src/sys/dev/pci/drm/i915/display/skl_watermark.c", 526 ); __builtin_expect(!!(__ret), 0); }); |
527 | } |
528 | |
529 | static unsigned int mbus_ddb_offset(struct drm_i915_privateinteldrm_softc *i915, u8 slice_mask) |
530 | { |
531 | struct skl_ddb_entry ddb; |
532 | |
533 | if (slice_mask & (BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)))) |
534 | slice_mask = BIT(DBUF_S1)(1UL << (DBUF_S1)); |
535 | else if (slice_mask & (BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)))) |
536 | slice_mask = BIT(DBUF_S3)(1UL << (DBUF_S3)); |
537 | |
538 | skl_ddb_entry_for_slices(i915, slice_mask, &ddb); |
539 | |
540 | return ddb.start; |
541 | } |
542 | |
543 | u32 skl_ddb_dbuf_slice_mask(struct drm_i915_privateinteldrm_softc *i915, |
544 | const struct skl_ddb_entry *entry) |
545 | { |
546 | int slice_size = intel_dbuf_slice_size(i915); |
547 | enum dbuf_slice start_slice, end_slice; |
548 | u8 slice_mask = 0; |
549 | |
550 | if (!skl_ddb_entry_size(entry)) |
551 | return 0; |
552 | |
553 | start_slice = entry->start / slice_size; |
554 | end_slice = (entry->end - 1) / slice_size; |
555 | |
556 | /* |
557 | * Per plane DDB entry can in a really worst case be on multiple slices |
558 | * but single entry is anyway contigious. |
559 | */ |
560 | while (start_slice <= end_slice) { |
561 | slice_mask |= BIT(start_slice)(1UL << (start_slice)); |
562 | start_slice++; |
563 | } |
564 | |
565 | return slice_mask; |
566 | } |
567 | |
568 | static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state) |
569 | { |
570 | const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; |
571 | int hdisplay, vdisplay; |
572 | |
573 | if (!crtc_state->hw.active) |
574 | return 0; |
575 | |
576 | /* |
577 | * Watermark/ddb requirement highly depends upon width of the |
578 | * framebuffer, So instead of allocating DDB equally among pipes |
579 | * distribute DDB based on resolution/width of the display. |
580 | */ |
581 | drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay); |
582 | |
583 | return hdisplay; |
584 | } |
585 | |
586 | static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state, |
587 | enum pipe for_pipe, |
588 | unsigned int *weight_start, |
589 | unsigned int *weight_end, |
590 | unsigned int *weight_total) |
591 | { |
592 | struct drm_i915_privateinteldrm_softc *i915 = |
593 | to_i915(dbuf_state->base.state->base.dev); |
594 | enum pipe pipe; |
595 | |
596 | *weight_start = 0; |
597 | *weight_end = 0; |
598 | *weight_total = 0; |
599 | |
600 | for_each_pipe(i915, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!( (&(i915)->__runtime)->pipe_mask & (1UL << (pipe)))) {} else { |
601 | int weight = dbuf_state->weight[pipe]; |
602 | |
603 | /* |
604 | * Do not account pipes using other slice sets |
605 | * luckily as of current BSpec slice sets do not partially |
606 | * intersect(pipes share either same one slice or same slice set |
607 | * i.e no partial intersection), so it is enough to check for |
608 | * equality for now. |
609 | */ |
610 | if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe]) |
611 | continue; |
612 | |
613 | *weight_total += weight; |
614 | if (pipe < for_pipe) { |
615 | *weight_start += weight; |
616 | *weight_end += weight; |
617 | } else if (pipe == for_pipe) { |
618 | *weight_end += weight; |
619 | } |
620 | } |
621 | } |
622 | |
623 | static int |
624 | skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) |
625 | { |
626 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
627 | unsigned int weight_total, weight_start, weight_end; |
628 | const struct intel_dbuf_state *old_dbuf_state = |
629 | intel_atomic_get_old_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_old_global_obj_state(state, & to_i915(state->base.dev)->display.dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
630 | struct intel_dbuf_state *new_dbuf_state = |
631 | intel_atomic_get_new_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_new_global_obj_state(state, & to_i915(state->base.dev)->display.dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
632 | struct intel_crtc_state *crtc_state; |
633 | struct skl_ddb_entry ddb_slices; |
634 | enum pipe pipe = crtc->pipe; |
635 | unsigned int mbus_offset = 0; |
636 | u32 ddb_range_size; |
637 | u32 dbuf_slice_mask; |
638 | u32 start, end; |
639 | int ret; |
640 | |
641 | if (new_dbuf_state->weight[pipe] == 0) { |
642 | skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0); |
643 | goto out; |
644 | } |
645 | |
646 | dbuf_slice_mask = new_dbuf_state->slices[pipe]; |
647 | |
648 | skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices); |
649 | mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask); |
650 | ddb_range_size = skl_ddb_entry_size(&ddb_slices); |
651 | |
652 | intel_crtc_dbuf_weights(new_dbuf_state, pipe, |
653 | &weight_start, &weight_end, &weight_total); |
654 | |
655 | start = ddb_range_size * weight_start / weight_total; |
656 | end = ddb_range_size * weight_end / weight_total; |
657 | |
658 | skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], |
659 | ddb_slices.start - mbus_offset + start, |
660 | ddb_slices.start - mbus_offset + end); |
661 | |
662 | out: |
663 | if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] && |
664 | skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe], |
665 | &new_dbuf_state->ddb[pipe])) |
666 | return 0; |
667 | |
668 | ret = intel_atomic_lock_global_state(&new_dbuf_state->base); |
669 | if (ret) |
670 | return ret; |
671 | |
672 | crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); |
673 | if (IS_ERR(crtc_state)) |
674 | return PTR_ERR(crtc_state); |
675 | |
676 | /* |
677 | * Used for checking overlaps, so we need absolute |
678 | * offsets instead of MBUS relative offsets. |
679 | */ |
680 | crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start; |
681 | crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end; |
682 | |
683 | drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n" , crtc->base.base.id, crtc->base.name, old_dbuf_state-> slices[pipe], new_dbuf_state->slices[pipe], old_dbuf_state ->ddb[pipe].start, old_dbuf_state->ddb[pipe].end, new_dbuf_state ->ddb[pipe].start, new_dbuf_state->ddb[pipe].end, old_dbuf_state ->active_pipes, new_dbuf_state->active_pipes) |
684 | "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n" , crtc->base.base.id, crtc->base.name, old_dbuf_state-> slices[pipe], new_dbuf_state->slices[pipe], old_dbuf_state ->ddb[pipe].start, old_dbuf_state->ddb[pipe].end, new_dbuf_state ->ddb[pipe].start, new_dbuf_state->ddb[pipe].end, old_dbuf_state ->active_pipes, new_dbuf_state->active_pipes) |
685 | crtc->base.base.id, crtc->base.name,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n" , crtc->base.base.id, crtc->base.name, old_dbuf_state-> slices[pipe], new_dbuf_state->slices[pipe], old_dbuf_state ->ddb[pipe].start, old_dbuf_state->ddb[pipe].end, new_dbuf_state ->ddb[pipe].start, new_dbuf_state->ddb[pipe].end, old_dbuf_state ->active_pipes, new_dbuf_state->active_pipes) |
686 | old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n" , crtc->base.base.id, crtc->base.name, old_dbuf_state-> slices[pipe], new_dbuf_state->slices[pipe], old_dbuf_state ->ddb[pipe].start, old_dbuf_state->ddb[pipe].end, new_dbuf_state ->ddb[pipe].start, new_dbuf_state->ddb[pipe].end, old_dbuf_state ->active_pipes, new_dbuf_state->active_pipes) |
687 | old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n" , crtc->base.base.id, crtc->base.name, old_dbuf_state-> slices[pipe], new_dbuf_state->slices[pipe], old_dbuf_state ->ddb[pipe].start, old_dbuf_state->ddb[pipe].end, new_dbuf_state ->ddb[pipe].start, new_dbuf_state->ddb[pipe].end, old_dbuf_state ->active_pipes, new_dbuf_state->active_pipes) |
688 | new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n" , crtc->base.base.id, crtc->base.name, old_dbuf_state-> slices[pipe], new_dbuf_state->slices[pipe], old_dbuf_state ->ddb[pipe].start, old_dbuf_state->ddb[pipe].end, new_dbuf_state ->ddb[pipe].start, new_dbuf_state->ddb[pipe].end, old_dbuf_state ->active_pipes, new_dbuf_state->active_pipes) |
689 | old_dbuf_state->active_pipes, new_dbuf_state->active_pipes)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n" , crtc->base.base.id, crtc->base.name, old_dbuf_state-> slices[pipe], new_dbuf_state->slices[pipe], old_dbuf_state ->ddb[pipe].start, old_dbuf_state->ddb[pipe].end, new_dbuf_state ->ddb[pipe].start, new_dbuf_state->ddb[pipe].end, old_dbuf_state ->active_pipes, new_dbuf_state->active_pipes); |
690 | |
691 | return 0; |
692 | } |
693 | |
694 | static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, |
695 | int width, const struct drm_format_info *format, |
696 | u64 modifier, unsigned int rotation, |
697 | u32 plane_pixel_rate, struct skl_wm_params *wp, |
698 | int color_plane); |
699 | |
700 | static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, |
701 | struct intel_plane *plane, |
702 | int level, |
703 | unsigned int latency, |
704 | const struct skl_wm_params *wp, |
705 | const struct skl_wm_level *result_prev, |
706 | struct skl_wm_level *result /* out */); |
707 | |
708 | static unsigned int |
709 | skl_cursor_allocation(const struct intel_crtc_state *crtc_state, |
710 | int num_active) |
711 | { |
712 | struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor)({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = (crtc_state->uapi.crtc->cursor); (struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base ) );}); |
713 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc_state->uapi.crtc->dev); |
714 | int level, max_level = ilk_wm_max_level(i915); |
715 | struct skl_wm_level wm = {}; |
716 | int ret, min_ddb_alloc = 0; |
717 | struct skl_wm_params wp; |
718 | |
719 | ret = skl_compute_wm_params(crtc_state, 256, |
720 | drm_format_info(DRM_FORMAT_ARGB8888((__u32)('A') | ((__u32)('R') << 8) | ((__u32)('2') << 16) | ((__u32)('4') << 24))), |
721 | DRM_FORMAT_MOD_LINEAR((((__u64)0) << 56) | ((0) & 0x00ffffffffffffffULL) ), |
722 | DRM_MODE_ROTATE_0(1<<0), |
723 | crtc_state->pixel_rate, &wp, 0); |
724 | drm_WARN_ON(&i915->drm, ret)({ int __ret = !!((ret)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&i915->drm))->dev), "", "drm_WARN_ON(" "ret" ")" ); __builtin_expect(!!(__ret), 0); }); |
725 | |
726 | for (level = 0; level <= max_level; level++) { |
727 | unsigned int latency = i915->display.wm.skl_latency[level]; |
728 | |
729 | skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); |
730 | if (wm.min_ddb_alloc == U16_MAX0xffff) |
731 | break; |
732 | |
733 | min_ddb_alloc = wm.min_ddb_alloc; |
734 | } |
735 | |
736 | return max(num_active == 1 ? 32 : 8, min_ddb_alloc)(((num_active == 1 ? 32 : 8)>(min_ddb_alloc))?(num_active == 1 ? 32 : 8):(min_ddb_alloc)); |
737 | } |
738 | |
739 | static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) |
740 | { |
741 | skl_ddb_entry_init(entry, |
742 | REG_FIELD_GET(PLANE_BUF_START_MASK, reg)((u32)((typeof(((u32)((((~0UL) >> (64 - (11) - 1)) & ((~0UL) << (0))) + 0))))(((reg) & (((u32)((((~0UL) >> (64 - (11) - 1)) & ((~0UL) << (0))) + 0)) )) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - ( 11) - 1)) & ((~0UL) << (0))) + 0))) - 1)))), |
743 | REG_FIELD_GET(PLANE_BUF_END_MASK, reg)((u32)((typeof(((u32)((((~0UL) >> (64 - (27) - 1)) & ((~0UL) << (16))) + 0))))(((reg) & (((u32)((((~0UL ) >> (64 - (27) - 1)) & ((~0UL) << (16))) + 0 )))) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - (27) - 1)) & ((~0UL) << (16))) + 0))) - 1))))); |
744 | if (entry->end) |
745 | entry->end++; |
746 | } |
747 | |
748 | static void |
749 | skl_ddb_get_hw_plane_state(struct drm_i915_privateinteldrm_softc *i915, |
750 | const enum pipe pipe, |
751 | const enum plane_id plane_id, |
752 | struct skl_ddb_entry *ddb, |
753 | struct skl_ddb_entry *ddb_y) |
754 | { |
755 | u32 val; |
756 | |
757 | /* Cursor doesn't support NV12/planar, so no extra calculation needed */ |
758 | if (plane_id == PLANE_CURSOR) { |
759 | val = intel_uncore_read(&i915->uncore, CUR_BUF_CFG(pipe)((const i915_reg_t){ .reg = (((0x7017c) + (pipe) * ((0x7117c) - (0x7017c)))) })); |
760 | skl_ddb_entry_init_from_hw(ddb, val); |
761 | return; |
762 | } |
763 | |
764 | val = intel_uncore_read(&i915->uncore, PLANE_BUF_CFG(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x7027c) + (pipe) * ((0x7127c ) - (0x7027c)))) + (plane_id) * ((((0x7037c) + (pipe) * ((0x7137c ) - (0x7037c)))) - (((0x7027c) + (pipe) * ((0x7127c) - (0x7027c ))))))) })); |
765 | skl_ddb_entry_init_from_hw(ddb, val); |
766 | |
767 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11) |
768 | return; |
769 | |
770 | val = intel_uncore_read(&i915->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x70278) + (pipe) * ((0x71278 ) - (0x70278)))) + (plane_id) * ((((0x70378) + (pipe) * ((0x71378 ) - (0x70378)))) - (((0x70278) + (pipe) * ((0x71278) - (0x70278 ))))))) })); |
771 | skl_ddb_entry_init_from_hw(ddb_y, val); |
772 | } |
773 | |
774 | static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, |
775 | struct skl_ddb_entry *ddb, |
776 | struct skl_ddb_entry *ddb_y) |
777 | { |
778 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
779 | enum intel_display_power_domain power_domain; |
780 | enum pipe pipe = crtc->pipe; |
781 | intel_wakeref_t wakeref; |
782 | enum plane_id plane_id; |
783 | |
784 | power_domain = POWER_DOMAIN_PIPE(pipe)((pipe) + POWER_DOMAIN_PIPE_A); |
785 | wakeref = intel_display_power_get_if_enabled(i915, power_domain); |
786 | if (!wakeref) |
787 | return; |
788 | |
789 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else |
790 | skl_ddb_get_hw_plane_state(i915, pipe, |
791 | plane_id, |
792 | &ddb[plane_id], |
793 | &ddb_y[plane_id]); |
794 | |
795 | intel_display_power_put(i915, power_domain, wakeref); |
796 | } |
797 | |
798 | struct dbuf_slice_conf_entry { |
799 | u8 active_pipes; |
800 | u8 dbuf_mask[I915_MAX_PIPES]; |
801 | bool_Bool join_mbus; |
802 | }; |
803 | |
804 | /* |
805 | * Table taken from Bspec 12716 |
806 | * Pipes do have some preferred DBuf slice affinity, |
807 | * plus there are some hardcoded requirements on how |
808 | * those should be distributed for multipipe scenarios. |
809 | * For more DBuf slices algorithm can get even more messy |
810 | * and less readable, so decided to use a table almost |
811 | * as is from BSpec itself - that way it is at least easier |
812 | * to compare, change and check. |
813 | */ |
814 | static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] = |
815 | /* Autogenerated with igt/tools/intel_dbuf_map tool: */ |
816 | { |
817 | { |
818 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)), |
819 | .dbuf_mask = { |
820 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
821 | }, |
822 | }, |
823 | { |
824 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)), |
825 | .dbuf_mask = { |
826 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
827 | }, |
828 | }, |
829 | { |
830 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)), |
831 | .dbuf_mask = { |
832 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
833 | [PIPE_B] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
834 | }, |
835 | }, |
836 | { |
837 | .active_pipes = BIT(PIPE_C)(1UL << (PIPE_C)), |
838 | .dbuf_mask = { |
839 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
840 | }, |
841 | }, |
842 | { |
843 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_C)(1UL << (PIPE_C)), |
844 | .dbuf_mask = { |
845 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
846 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
847 | }, |
848 | }, |
849 | { |
850 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)), |
851 | .dbuf_mask = { |
852 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
853 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
854 | }, |
855 | }, |
856 | { |
857 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)), |
858 | .dbuf_mask = { |
859 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
860 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
861 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
862 | }, |
863 | }, |
864 | {} |
865 | }; |
866 | |
867 | /* |
868 | * Table taken from Bspec 49255 |
869 | * Pipes do have some preferred DBuf slice affinity, |
870 | * plus there are some hardcoded requirements on how |
871 | * those should be distributed for multipipe scenarios. |
872 | * For more DBuf slices algorithm can get even more messy |
873 | * and less readable, so decided to use a table almost |
874 | * as is from BSpec itself - that way it is at least easier |
875 | * to compare, change and check. |
876 | */ |
877 | static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] = |
878 | /* Autogenerated with igt/tools/intel_dbuf_map tool: */ |
879 | { |
880 | { |
881 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)), |
882 | .dbuf_mask = { |
883 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
884 | }, |
885 | }, |
886 | { |
887 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)), |
888 | .dbuf_mask = { |
889 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
890 | }, |
891 | }, |
892 | { |
893 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)), |
894 | .dbuf_mask = { |
895 | [PIPE_A] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
896 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
897 | }, |
898 | }, |
899 | { |
900 | .active_pipes = BIT(PIPE_C)(1UL << (PIPE_C)), |
901 | .dbuf_mask = { |
902 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)) | BIT(DBUF_S1)(1UL << (DBUF_S1)), |
903 | }, |
904 | }, |
905 | { |
906 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_C)(1UL << (PIPE_C)), |
907 | .dbuf_mask = { |
908 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
909 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
910 | }, |
911 | }, |
912 | { |
913 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)), |
914 | .dbuf_mask = { |
915 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
916 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
917 | }, |
918 | }, |
919 | { |
920 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)), |
921 | .dbuf_mask = { |
922 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
923 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
924 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
925 | }, |
926 | }, |
927 | { |
928 | .active_pipes = BIT(PIPE_D)(1UL << (PIPE_D)), |
929 | .dbuf_mask = { |
930 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)) | BIT(DBUF_S1)(1UL << (DBUF_S1)), |
931 | }, |
932 | }, |
933 | { |
934 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
935 | .dbuf_mask = { |
936 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
937 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
938 | }, |
939 | }, |
940 | { |
941 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
942 | .dbuf_mask = { |
943 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
944 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
945 | }, |
946 | }, |
947 | { |
948 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
949 | .dbuf_mask = { |
950 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
951 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
952 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
953 | }, |
954 | }, |
955 | { |
956 | .active_pipes = BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
957 | .dbuf_mask = { |
958 | [PIPE_C] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
959 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
960 | }, |
961 | }, |
962 | { |
963 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
964 | .dbuf_mask = { |
965 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
966 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
967 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
968 | }, |
969 | }, |
970 | { |
971 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
972 | .dbuf_mask = { |
973 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
974 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
975 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
976 | }, |
977 | }, |
978 | { |
979 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
980 | .dbuf_mask = { |
981 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
982 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
983 | [PIPE_C] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
984 | [PIPE_D] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
985 | }, |
986 | }, |
987 | {} |
988 | }; |
989 | |
990 | static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = { |
991 | { |
992 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)), |
993 | .dbuf_mask = { |
994 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
995 | }, |
996 | }, |
997 | { |
998 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)), |
999 | .dbuf_mask = { |
1000 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1001 | }, |
1002 | }, |
1003 | { |
1004 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)), |
1005 | .dbuf_mask = { |
1006 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
1007 | [PIPE_B] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1008 | }, |
1009 | }, |
1010 | { |
1011 | .active_pipes = BIT(PIPE_C)(1UL << (PIPE_C)), |
1012 | .dbuf_mask = { |
1013 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1014 | }, |
1015 | }, |
1016 | { |
1017 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_C)(1UL << (PIPE_C)), |
1018 | .dbuf_mask = { |
1019 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1020 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1021 | }, |
1022 | }, |
1023 | { |
1024 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)), |
1025 | .dbuf_mask = { |
1026 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1027 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1028 | }, |
1029 | }, |
1030 | { |
1031 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)), |
1032 | .dbuf_mask = { |
1033 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
1034 | [PIPE_B] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1035 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1036 | }, |
1037 | }, |
1038 | { |
1039 | .active_pipes = BIT(PIPE_D)(1UL << (PIPE_D)), |
1040 | .dbuf_mask = { |
1041 | [PIPE_D] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1042 | }, |
1043 | }, |
1044 | { |
1045 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1046 | .dbuf_mask = { |
1047 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1048 | [PIPE_D] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1049 | }, |
1050 | }, |
1051 | { |
1052 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1053 | .dbuf_mask = { |
1054 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1055 | [PIPE_D] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1056 | }, |
1057 | }, |
1058 | { |
1059 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1060 | .dbuf_mask = { |
1061 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
1062 | [PIPE_B] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1063 | [PIPE_D] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1064 | }, |
1065 | }, |
1066 | { |
1067 | .active_pipes = BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1068 | .dbuf_mask = { |
1069 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)), |
1070 | [PIPE_D] = BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1071 | }, |
1072 | }, |
1073 | { |
1074 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1075 | .dbuf_mask = { |
1076 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1077 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)), |
1078 | [PIPE_D] = BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1079 | }, |
1080 | }, |
1081 | { |
1082 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1083 | .dbuf_mask = { |
1084 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1085 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)), |
1086 | [PIPE_D] = BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1087 | }, |
1088 | }, |
1089 | { |
1090 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1091 | .dbuf_mask = { |
1092 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)), |
1093 | [PIPE_B] = BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1094 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)), |
1095 | [PIPE_D] = BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1096 | }, |
1097 | }, |
1098 | {} |
1099 | }; |
1100 | |
1101 | static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { |
1102 | /* |
1103 | * Keep the join_mbus cases first so check_mbus_joined() |
1104 | * will prefer them over the !join_mbus cases. |
1105 | */ |
1106 | { |
1107 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)), |
1108 | .dbuf_mask = { |
1109 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)) | BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1110 | }, |
1111 | .join_mbus = true1, |
1112 | }, |
1113 | { |
1114 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)), |
1115 | .dbuf_mask = { |
1116 | [PIPE_B] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)) | BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1117 | }, |
1118 | .join_mbus = true1, |
1119 | }, |
1120 | { |
1121 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)), |
1122 | .dbuf_mask = { |
1123 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1124 | }, |
1125 | .join_mbus = false0, |
1126 | }, |
1127 | { |
1128 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)), |
1129 | .dbuf_mask = { |
1130 | [PIPE_B] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1131 | }, |
1132 | .join_mbus = false0, |
1133 | }, |
1134 | { |
1135 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)), |
1136 | .dbuf_mask = { |
1137 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1138 | [PIPE_B] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1139 | }, |
1140 | }, |
1141 | { |
1142 | .active_pipes = BIT(PIPE_C)(1UL << (PIPE_C)), |
1143 | .dbuf_mask = { |
1144 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1145 | }, |
1146 | }, |
1147 | { |
1148 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_C)(1UL << (PIPE_C)), |
1149 | .dbuf_mask = { |
1150 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1151 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1152 | }, |
1153 | }, |
1154 | { |
1155 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)), |
1156 | .dbuf_mask = { |
1157 | [PIPE_B] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1158 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1159 | }, |
1160 | }, |
1161 | { |
1162 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)), |
1163 | .dbuf_mask = { |
1164 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1165 | [PIPE_B] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1166 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1167 | }, |
1168 | }, |
1169 | { |
1170 | .active_pipes = BIT(PIPE_D)(1UL << (PIPE_D)), |
1171 | .dbuf_mask = { |
1172 | [PIPE_D] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1173 | }, |
1174 | }, |
1175 | { |
1176 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1177 | .dbuf_mask = { |
1178 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1179 | [PIPE_D] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1180 | }, |
1181 | }, |
1182 | { |
1183 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1184 | .dbuf_mask = { |
1185 | [PIPE_B] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1186 | [PIPE_D] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1187 | }, |
1188 | }, |
1189 | { |
1190 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1191 | .dbuf_mask = { |
1192 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1193 | [PIPE_B] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1194 | [PIPE_D] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1195 | }, |
1196 | }, |
1197 | { |
1198 | .active_pipes = BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1199 | .dbuf_mask = { |
1200 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1201 | [PIPE_D] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1202 | }, |
1203 | }, |
1204 | { |
1205 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1206 | .dbuf_mask = { |
1207 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1208 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1209 | [PIPE_D] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1210 | }, |
1211 | }, |
1212 | { |
1213 | .active_pipes = BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1214 | .dbuf_mask = { |
1215 | [PIPE_B] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1216 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1217 | [PIPE_D] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1218 | }, |
1219 | }, |
1220 | { |
1221 | .active_pipes = BIT(PIPE_A)(1UL << (PIPE_A)) | BIT(PIPE_B)(1UL << (PIPE_B)) | BIT(PIPE_C)(1UL << (PIPE_C)) | BIT(PIPE_D)(1UL << (PIPE_D)), |
1222 | .dbuf_mask = { |
1223 | [PIPE_A] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1224 | [PIPE_B] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1225 | [PIPE_C] = BIT(DBUF_S3)(1UL << (DBUF_S3)) | BIT(DBUF_S4)(1UL << (DBUF_S4)), |
1226 | [PIPE_D] = BIT(DBUF_S1)(1UL << (DBUF_S1)) | BIT(DBUF_S2)(1UL << (DBUF_S2)), |
1227 | }, |
1228 | }, |
1229 | {} |
1230 | |
1231 | }; |
1232 | |
1233 | static bool_Bool check_mbus_joined(u8 active_pipes, |
1234 | const struct dbuf_slice_conf_entry *dbuf_slices) |
1235 | { |
1236 | int i; |
1237 | |
1238 | for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { |
1239 | if (dbuf_slices[i].active_pipes == active_pipes) |
1240 | return dbuf_slices[i].join_mbus; |
1241 | } |
1242 | return false0; |
1243 | } |
1244 | |
1245 | static bool_Bool adlp_check_mbus_joined(u8 active_pipes) |
1246 | { |
1247 | return check_mbus_joined(active_pipes, adlp_allowed_dbufs); |
1248 | } |
1249 | |
1250 | static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool_Bool join_mbus, |
1251 | const struct dbuf_slice_conf_entry *dbuf_slices) |
1252 | { |
1253 | int i; |
1254 | |
1255 | for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { |
1256 | if (dbuf_slices[i].active_pipes == active_pipes && |
1257 | dbuf_slices[i].join_mbus == join_mbus) |
1258 | return dbuf_slices[i].dbuf_mask[pipe]; |
1259 | } |
1260 | return 0; |
1261 | } |
1262 | |
1263 | /* |
1264 | * This function finds an entry with same enabled pipe configuration and |
1265 | * returns correspondent DBuf slice mask as stated in BSpec for particular |
1266 | * platform. |
1267 | */ |
1268 | static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool_Bool join_mbus) |
1269 | { |
1270 | /* |
1271 | * FIXME: For ICL this is still a bit unclear as prev BSpec revision |
1272 | * required calculating "pipe ratio" in order to determine |
1273 | * if one or two slices can be used for single pipe configurations |
1274 | * as additional constraint to the existing table. |
1275 | * However based on recent info, it should be not "pipe ratio" |
1276 | * but rather ratio between pixel_rate and cdclk with additional |
1277 | * constants, so for now we are using only table until this is |
1278 | * clarified. Also this is the reason why crtc_state param is |
1279 | * still here - we will need it once those additional constraints |
1280 | * pop up. |
1281 | */ |
1282 | return compute_dbuf_slices(pipe, active_pipes, join_mbus, |
1283 | icl_allowed_dbufs); |
1284 | } |
1285 | |
1286 | static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool_Bool join_mbus) |
1287 | { |
1288 | return compute_dbuf_slices(pipe, active_pipes, join_mbus, |
1289 | tgl_allowed_dbufs); |
1290 | } |
1291 | |
1292 | static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool_Bool join_mbus) |
1293 | { |
1294 | return compute_dbuf_slices(pipe, active_pipes, join_mbus, |
1295 | adlp_allowed_dbufs); |
1296 | } |
1297 | |
1298 | static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool_Bool join_mbus) |
1299 | { |
1300 | return compute_dbuf_slices(pipe, active_pipes, join_mbus, |
1301 | dg2_allowed_dbufs); |
1302 | } |
1303 | |
1304 | static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool_Bool join_mbus) |
1305 | { |
1306 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
1307 | enum pipe pipe = crtc->pipe; |
1308 | |
1309 | if (IS_DG2(i915)IS_PLATFORM(i915, INTEL_DG2)) |
1310 | return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus); |
1311 | else if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 13) |
1312 | return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus); |
1313 | else if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) == 12) |
1314 | return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus); |
1315 | else if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) == 11) |
1316 | return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus); |
1317 | /* |
1318 | * For anything else just return one slice yet. |
1319 | * Should be extended for other platforms. |
1320 | */ |
1321 | return active_pipes & BIT(pipe)(1UL << (pipe)) ? BIT(DBUF_S1)(1UL << (DBUF_S1)) : 0; |
1322 | } |
1323 | |
1324 | static bool_Bool |
1325 | use_minimal_wm0_only(const struct intel_crtc_state *crtc_state, |
1326 | struct intel_plane *plane) |
1327 | { |
1328 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(plane->base.dev); |
1329 | |
1330 | return DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 13 && |
1331 | crtc_state->uapi.async_flip && |
1332 | plane->async_flip; |
1333 | } |
1334 | |
1335 | static u64 |
1336 | skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state) |
1337 | { |
1338 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); |
1339 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
1340 | enum plane_id plane_id; |
1341 | u64 data_rate = 0; |
1342 | |
1343 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { |
1344 | if (plane_id == PLANE_CURSOR) |
1345 | continue; |
1346 | |
1347 | data_rate += crtc_state->rel_data_rate[plane_id]; |
1348 | |
1349 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) < 11) |
1350 | data_rate += crtc_state->rel_data_rate_y[plane_id]; |
1351 | } |
1352 | |
1353 | return data_rate; |
1354 | } |
1355 | |
1356 | static const struct skl_wm_level * |
1357 | skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm, |
1358 | enum plane_id plane_id, |
1359 | int level) |
1360 | { |
1361 | const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; |
1362 | |
1363 | if (level == 0 && pipe_wm->use_sagv_wm) |
1364 | return &wm->sagv.wm0; |
1365 | |
1366 | return &wm->wm[level]; |
1367 | } |
1368 | |
1369 | static const struct skl_wm_level * |
1370 | skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm, |
1371 | enum plane_id plane_id) |
1372 | { |
1373 | const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; |
1374 | |
1375 | if (pipe_wm->use_sagv_wm) |
1376 | return &wm->sagv.trans_wm; |
1377 | |
1378 | return &wm->trans_wm; |
1379 | } |
1380 | |
1381 | /* |
1382 | * We only disable the watermarks for each plane if |
1383 | * they exceed the ddb allocation of said plane. This |
1384 | * is done so that we don't end up touching cursor |
1385 | * watermarks needlessly when some other plane reduces |
1386 | * our max possible watermark level. |
1387 | * |
1388 | * Bspec has this to say about the PLANE_WM enable bit: |
1389 | * "All the watermarks at this level for all enabled |
1390 | * planes must be enabled before the level will be used." |
1391 | * So this is actually safe to do. |
1392 | */ |
1393 | static void |
1394 | skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb) |
1395 | { |
1396 | if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) |
1397 | memset(wm, 0, sizeof(*wm))__builtin_memset((wm), (0), (sizeof(*wm))); |
1398 | } |
1399 | |
1400 | static void |
1401 | skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, |
1402 | const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb) |
1403 | { |
1404 | if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) || |
1405 | uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) { |
1406 | memset(wm, 0, sizeof(*wm))__builtin_memset((wm), (0), (sizeof(*wm))); |
1407 | memset(uv_wm, 0, sizeof(*uv_wm))__builtin_memset((uv_wm), (0), (sizeof(*uv_wm))); |
1408 | } |
1409 | } |
1410 | |
1411 | static bool_Bool icl_need_wm1_wa(struct drm_i915_privateinteldrm_softc *i915, |
1412 | enum plane_id plane_id) |
1413 | { |
1414 | /* |
1415 | * Wa_1408961008:icl, ehl |
1416 | * Wa_14012656716:tgl, adl |
1417 | * Underruns with WM1+ disabled |
1418 | */ |
1419 | return DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) == 11 || |
1420 | (IS_DISPLAY_VER(i915, 12, 13)(((&(i915)->__runtime)->display.ip.ver) >= (12) && ((&(i915)->__runtime)->display.ip.ver) <= (13)) && plane_id == PLANE_CURSOR); |
1421 | } |
1422 | |
1423 | struct skl_plane_ddb_iter { |
1424 | u64 data_rate; |
1425 | u16 start, size; |
1426 | }; |
1427 | |
1428 | static void |
1429 | skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter, |
1430 | struct skl_ddb_entry *ddb, |
1431 | const struct skl_wm_level *wm, |
1432 | u64 data_rate) |
1433 | { |
1434 | u16 size, extra = 0; |
1435 | |
1436 | if (data_rate) { |
1437 | extra = min_t(u16, iter->size,({ u16 __min_a = (iter->size); u16 __min_b = (({ uint64_t _t = (iter->data_rate); div64_u64((iter->size * data_rate ) + _t - 1, _t); })); __min_a < __min_b ? __min_a : __min_b ; }) |
1438 | DIV64_U64_ROUND_UP(iter->size * data_rate,({ u16 __min_a = (iter->size); u16 __min_b = (({ uint64_t _t = (iter->data_rate); div64_u64((iter->size * data_rate ) + _t - 1, _t); })); __min_a < __min_b ? __min_a : __min_b ; }) |
1439 | iter->data_rate))({ u16 __min_a = (iter->size); u16 __min_b = (({ uint64_t _t = (iter->data_rate); div64_u64((iter->size * data_rate ) + _t - 1, _t); })); __min_a < __min_b ? __min_a : __min_b ; }); |
1440 | iter->size -= extra; |
1441 | iter->data_rate -= data_rate; |
1442 | } |
1443 | |
1444 | /* |
1445 | * Keep ddb entry of all disabled planes explicitly zeroed |
1446 | * to avoid skl_ddb_add_affected_planes() adding them to |
1447 | * the state when other planes change their allocations. |
1448 | */ |
1449 | size = wm->min_ddb_alloc + extra; |
1450 | if (size) |
1451 | iter->start = skl_ddb_entry_init(ddb, iter->start, |
1452 | iter->start + size); |
1453 | } |
1454 | |
1455 | static int |
1456 | skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, |
1457 | struct intel_crtc *crtc) |
1458 | { |
1459 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
1460 | struct intel_crtc_state *crtc_state = |
1461 | intel_atomic_get_new_crtc_state(state, crtc); |
1462 | const struct intel_dbuf_state *dbuf_state = |
1463 | intel_atomic_get_new_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_new_global_obj_state(state, & to_i915(state->base.dev)->display.dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
1464 | const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe]; |
1465 | int num_active = hweight8(dbuf_state->active_pipes); |
1466 | struct skl_plane_ddb_iter iter; |
1467 | enum plane_id plane_id; |
1468 | u16 cursor_size; |
1469 | u32 blocks; |
1470 | int level; |
1471 | |
1472 | /* Clear the partitioning for disabled planes. */ |
1473 | memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb))__builtin_memset((crtc_state->wm.skl.plane_ddb), (0), (sizeof (crtc_state->wm.skl.plane_ddb))); |
1474 | memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y))__builtin_memset((crtc_state->wm.skl.plane_ddb_y), (0), (sizeof (crtc_state->wm.skl.plane_ddb_y))); |
1475 | |
1476 | if (!crtc_state->hw.active) |
1477 | return 0; |
1478 | |
1479 | iter.start = alloc->start; |
1480 | iter.size = skl_ddb_entry_size(alloc); |
1481 | if (iter.size == 0) |
1482 | return 0; |
1483 | |
1484 | /* Allocate fixed number of blocks for cursor. */ |
1485 | cursor_size = skl_cursor_allocation(crtc_state, num_active); |
1486 | iter.size -= cursor_size; |
1487 | skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR], |
1488 | alloc->end - cursor_size, alloc->end); |
1489 | |
1490 | iter.data_rate = skl_total_relative_data_rate(crtc_state); |
1491 | |
1492 | /* |
1493 | * Find the highest watermark level for which we can satisfy the block |
1494 | * requirement of active planes. |
1495 | */ |
1496 | for (level = ilk_wm_max_level(i915); level >= 0; level--) { |
1497 | blocks = 0; |
1498 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { |
1499 | const struct skl_plane_wm *wm = |
1500 | &crtc_state->wm.skl.optimal.planes[plane_id]; |
1501 | |
1502 | if (plane_id == PLANE_CURSOR) { |
1503 | const struct skl_ddb_entry *ddb = |
1504 | &crtc_state->wm.skl.plane_ddb[plane_id]; |
1505 | |
1506 | if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) { |
1507 | drm_WARN_ON(&i915->drm,({ int __ret = !!((wm->wm[level].min_ddb_alloc != 0xffff)) ; if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "wm->wm[level].min_ddb_alloc != 0xffff" ")"); __builtin_expect(!!(__ret), 0); }) |
1508 | wm->wm[level].min_ddb_alloc != U16_MAX)({ int __ret = !!((wm->wm[level].min_ddb_alloc != 0xffff)) ; if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "wm->wm[level].min_ddb_alloc != 0xffff" ")"); __builtin_expect(!!(__ret), 0); }); |
1509 | blocks = U32_MAX0xffffffffU; |
1510 | break; |
1511 | } |
1512 | continue; |
1513 | } |
1514 | |
1515 | blocks += wm->wm[level].min_ddb_alloc; |
1516 | blocks += wm->uv_wm[level].min_ddb_alloc; |
1517 | } |
1518 | |
1519 | if (blocks <= iter.size) { |
1520 | iter.size -= blocks; |
1521 | break; |
1522 | } |
1523 | } |
1524 | |
1525 | if (level < 0) { |
1526 | drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Requested display configuration exceeds system DDB limitations" ) |
1527 | "Requested display configuration exceeds system DDB limitations")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Requested display configuration exceeds system DDB limitations" ); |
1528 | drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "minimum required %d/%d\n" , blocks, iter.size) |
1529 | blocks, iter.size)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "minimum required %d/%d\n" , blocks, iter.size); |
1530 | return -EINVAL22; |
1531 | } |
1532 | |
1533 | /* avoid the WARN later when we don't allocate any extra DDB */ |
1534 | if (iter.data_rate == 0) |
1535 | iter.size = 0; |
1536 | |
1537 | /* |
1538 | * Grant each plane the blocks it requires at the highest achievable |
1539 | * watermark level, plus an extra share of the leftover blocks |
1540 | * proportional to its relative data rate. |
1541 | */ |
1542 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { |
1543 | struct skl_ddb_entry *ddb = |
1544 | &crtc_state->wm.skl.plane_ddb[plane_id]; |
1545 | struct skl_ddb_entry *ddb_y = |
1546 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; |
1547 | const struct skl_plane_wm *wm = |
1548 | &crtc_state->wm.skl.optimal.planes[plane_id]; |
1549 | |
1550 | if (plane_id == PLANE_CURSOR) |
1551 | continue; |
1552 | |
1553 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) < 11 && |
1554 | crtc_state->nv12_planes & BIT(plane_id)(1UL << (plane_id))) { |
1555 | skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level], |
1556 | crtc_state->rel_data_rate_y[plane_id]); |
1557 | skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level], |
1558 | crtc_state->rel_data_rate[plane_id]); |
1559 | } else { |
1560 | skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level], |
1561 | crtc_state->rel_data_rate[plane_id]); |
1562 | } |
1563 | } |
1564 | drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0)({ int __ret = !!((iter.size != 0 || iter.data_rate != 0)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&i915 ->drm))->dev), "", "drm_WARN_ON(" "iter.size != 0 || iter.data_rate != 0" ")"); __builtin_expect(!!(__ret), 0); }); |
1565 | |
1566 | /* |
1567 | * When we calculated watermark values we didn't know how high |
1568 | * of a level we'd actually be able to hit, so we just marked |
1569 | * all levels as "enabled." Go back now and disable the ones |
1570 | * that aren't actually possible. |
1571 | */ |
1572 | for (level++; level <= ilk_wm_max_level(i915); level++) { |
1573 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { |
1574 | const struct skl_ddb_entry *ddb = |
1575 | &crtc_state->wm.skl.plane_ddb[plane_id]; |
1576 | const struct skl_ddb_entry *ddb_y = |
1577 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; |
1578 | struct skl_plane_wm *wm = |
1579 | &crtc_state->wm.skl.optimal.planes[plane_id]; |
1580 | |
1581 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) < 11 && |
1582 | crtc_state->nv12_planes & BIT(plane_id)(1UL << (plane_id))) |
1583 | skl_check_nv12_wm_level(&wm->wm[level], |
1584 | &wm->uv_wm[level], |
1585 | ddb_y, ddb); |
1586 | else |
1587 | skl_check_wm_level(&wm->wm[level], ddb); |
1588 | |
1589 | if (icl_need_wm1_wa(i915, plane_id) && |
1590 | level == 1 && !wm->wm[level].enable && |
1591 | wm->wm[0].enable) { |
1592 | wm->wm[level].blocks = wm->wm[0].blocks; |
1593 | wm->wm[level].lines = wm->wm[0].lines; |
1594 | wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; |
1595 | } |
1596 | } |
1597 | } |
1598 | |
1599 | /* |
1600 | * Go back and disable the transition and SAGV watermarks |
1601 | * if it turns out we don't have enough DDB blocks for them. |
1602 | */ |
1603 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { |
1604 | const struct skl_ddb_entry *ddb = |
1605 | &crtc_state->wm.skl.plane_ddb[plane_id]; |
1606 | const struct skl_ddb_entry *ddb_y = |
1607 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; |
1608 | struct skl_plane_wm *wm = |
1609 | &crtc_state->wm.skl.optimal.planes[plane_id]; |
1610 | |
1611 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) < 11 && |
1612 | crtc_state->nv12_planes & BIT(plane_id)(1UL << (plane_id))) { |
1613 | skl_check_wm_level(&wm->trans_wm, ddb_y); |
1614 | } else { |
1615 | WARN_ON(skl_ddb_entry_size(ddb_y))({ int __ret = !!(skl_ddb_entry_size(ddb_y)); if (__ret) printf ("WARNING %s failed at %s:%d\n", "skl_ddb_entry_size(ddb_y)", "/usr/src/sys/dev/pci/drm/i915/display/skl_watermark.c", 1615 ); __builtin_expect(!!(__ret), 0); }); |
1616 | |
1617 | skl_check_wm_level(&wm->trans_wm, ddb); |
1618 | } |
1619 | |
1620 | skl_check_wm_level(&wm->sagv.wm0, ddb); |
1621 | skl_check_wm_level(&wm->sagv.trans_wm, ddb); |
1622 | } |
1623 | |
1624 | return 0; |
1625 | } |
1626 | |
1627 | /* |
1628 | * The max latency should be 257 (max the punit can code is 255 and we add 2us |
1629 | * for the read latency) and cpp should always be <= 8, so that |
1630 | * should allow pixel_rate up to ~2 GHz which seems sufficient since max |
1631 | * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. |
1632 | */ |
1633 | static uint_fixed_16_16_t |
1634 | skl_wm_method1(const struct drm_i915_privateinteldrm_softc *i915, u32 pixel_rate, |
1635 | u8 cpp, u32 latency, u32 dbuf_block_size) |
1636 | { |
1637 | u32 wm_intermediate_val; |
1638 | uint_fixed_16_16_t ret; |
1639 | |
1640 | if (latency == 0) |
1641 | return FP_16_16_MAX((uint_fixed_16_16_t){ .val = 0xffffffffU }); |
1642 | |
1643 | wm_intermediate_val = latency * pixel_rate * cpp; |
1644 | ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size); |
1645 | |
1646 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 10) |
1647 | ret = add_fixed16_u32(ret, 1); |
1648 | |
1649 | return ret; |
1650 | } |
1651 | |
1652 | static uint_fixed_16_16_t |
1653 | skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, |
1654 | uint_fixed_16_16_t plane_blocks_per_line) |
1655 | { |
1656 | u32 wm_intermediate_val; |
1657 | uint_fixed_16_16_t ret; |
1658 | |
1659 | if (latency == 0) |
1660 | return FP_16_16_MAX((uint_fixed_16_16_t){ .val = 0xffffffffU }); |
1661 | |
1662 | wm_intermediate_val = latency * pixel_rate; |
1663 | wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,(((wm_intermediate_val) + ((pipe_htotal * 1000) - 1)) / (pipe_htotal * 1000)) |
1664 | pipe_htotal * 1000)(((wm_intermediate_val) + ((pipe_htotal * 1000) - 1)) / (pipe_htotal * 1000)); |
1665 | ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line); |
1666 | return ret; |
1667 | } |
1668 | |
1669 | static uint_fixed_16_16_t |
1670 | intel_get_linetime_us(const struct intel_crtc_state *crtc_state) |
1671 | { |
1672 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc_state->uapi.crtc->dev); |
1673 | u32 pixel_rate; |
1674 | u32 crtc_htotal; |
1675 | uint_fixed_16_16_t linetime_us; |
1676 | |
1677 | if (!crtc_state->hw.active) |
1678 | return u32_to_fixed16(0); |
1679 | |
1680 | pixel_rate = crtc_state->pixel_rate; |
1681 | |
1682 | if (drm_WARN_ON(&i915->drm, pixel_rate == 0)({ int __ret = !!((pixel_rate == 0)); if (__ret) printf("%s %s: " "%s", dev_driver_string(((&i915->drm))->dev), "", "drm_WARN_ON(" "pixel_rate == 0" ")"); __builtin_expect(!!(__ret), 0); })) |
1683 | return u32_to_fixed16(0); |
1684 | |
1685 | crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal; |
1686 | linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); |
1687 | |
1688 | return linetime_us; |
1689 | } |
1690 | |
1691 | static int |
1692 | skl_compute_wm_params(const struct intel_crtc_state *crtc_state, |
1693 | int width, const struct drm_format_info *format, |
1694 | u64 modifier, unsigned int rotation, |
1695 | u32 plane_pixel_rate, struct skl_wm_params *wp, |
1696 | int color_plane) |
1697 | { |
1698 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); |
1699 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
1700 | u32 interm_pbpl; |
1701 | |
1702 | /* only planar format has two planes */ |
1703 | if (color_plane == 1 && |
1704 | !intel_format_info_is_yuv_semiplanar(format, modifier)) { |
1705 | drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Non planar format have single plane\n" ) |
1706 | "Non planar format have single plane\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Non planar format have single plane\n" ); |
1707 | return -EINVAL22; |
1708 | } |
1709 | |
1710 | wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED((((__u64)0x01) << 56) | ((2) & 0x00ffffffffffffffULL )) || |
1711 | modifier == I915_FORMAT_MOD_4_TILED((((__u64)0x01) << 56) | ((9) & 0x00ffffffffffffffULL )) || |
1712 | modifier == I915_FORMAT_MOD_Yf_TILED((((__u64)0x01) << 56) | ((3) & 0x00ffffffffffffffULL )) || |
1713 | modifier == I915_FORMAT_MOD_Y_TILED_CCS((((__u64)0x01) << 56) | ((4) & 0x00ffffffffffffffULL )) || |
1714 | modifier == I915_FORMAT_MOD_Yf_TILED_CCS((((__u64)0x01) << 56) | ((5) & 0x00ffffffffffffffULL )) || |
1715 | modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS((((__u64)0x01) << 56) | ((6) & 0x00ffffffffffffffULL )) || |
1716 | modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS((((__u64)0x01) << 56) | ((7) & 0x00ffffffffffffffULL )) || |
1717 | modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC((((__u64)0x01) << 56) | ((8) & 0x00ffffffffffffffULL )) || |
1718 | modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS((((__u64)0x01) << 56) | ((10) & 0x00ffffffffffffffULL )) || |
1719 | modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS((((__u64)0x01) << 56) | ((11) & 0x00ffffffffffffffULL )) || |
1720 | modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC((((__u64)0x01) << 56) | ((12) & 0x00ffffffffffffffULL )); |
1721 | wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED((((__u64)0x01) << 56) | ((1) & 0x00ffffffffffffffULL )); |
1722 | wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS((((__u64)0x01) << 56) | ((4) & 0x00ffffffffffffffULL )) || |
1723 | modifier == I915_FORMAT_MOD_Yf_TILED_CCS((((__u64)0x01) << 56) | ((5) & 0x00ffffffffffffffULL )) || |
1724 | modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS((((__u64)0x01) << 56) | ((6) & 0x00ffffffffffffffULL )) || |
1725 | modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS((((__u64)0x01) << 56) | ((7) & 0x00ffffffffffffffULL )) || |
1726 | modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC((((__u64)0x01) << 56) | ((8) & 0x00ffffffffffffffULL )) || |
1727 | modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS((((__u64)0x01) << 56) | ((10) & 0x00ffffffffffffffULL )) || |
1728 | modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS((((__u64)0x01) << 56) | ((11) & 0x00ffffffffffffffULL )) || |
1729 | modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC((((__u64)0x01) << 56) | ((12) & 0x00ffffffffffffffULL )); |
1730 | wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier); |
1731 | |
1732 | wp->width = width; |
1733 | if (color_plane == 1 && wp->is_planar) |
1734 | wp->width /= 2; |
1735 | |
1736 | wp->cpp = format->cpp[color_plane]; |
1737 | wp->plane_pixel_rate = plane_pixel_rate; |
1738 | |
1739 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11 && |
1740 | modifier == I915_FORMAT_MOD_Yf_TILED((((__u64)0x01) << 56) | ((3) & 0x00ffffffffffffffULL )) && wp->cpp == 1) |
1741 | wp->dbuf_block_size = 256; |
1742 | else |
1743 | wp->dbuf_block_size = 512; |
1744 | |
1745 | if (drm_rotation_90_or_270(rotation)) { |
1746 | switch (wp->cpp) { |
1747 | case 1: |
1748 | wp->y_min_scanlines = 16; |
1749 | break; |
1750 | case 2: |
1751 | wp->y_min_scanlines = 8; |
1752 | break; |
1753 | case 4: |
1754 | wp->y_min_scanlines = 4; |
1755 | break; |
1756 | default: |
1757 | MISSING_CASE(wp->cpp)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "wp->cpp", (long)(wp->cpp)); __builtin_expect(!!(__ret ), 0); }); |
1758 | return -EINVAL22; |
1759 | } |
1760 | } else { |
1761 | wp->y_min_scanlines = 4; |
1762 | } |
1763 | |
1764 | if (skl_needs_memory_bw_wa(i915)) |
1765 | wp->y_min_scanlines *= 2; |
1766 | |
1767 | wp->plane_bytes_per_line = wp->width * wp->cpp; |
1768 | if (wp->y_tiled) { |
1769 | interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *(((wp->plane_bytes_per_line * wp->y_min_scanlines) + (( wp->dbuf_block_size) - 1)) / (wp->dbuf_block_size)) |
1770 | wp->y_min_scanlines,(((wp->plane_bytes_per_line * wp->y_min_scanlines) + (( wp->dbuf_block_size) - 1)) / (wp->dbuf_block_size)) |
1771 | wp->dbuf_block_size)(((wp->plane_bytes_per_line * wp->y_min_scanlines) + (( wp->dbuf_block_size) - 1)) / (wp->dbuf_block_size)); |
1772 | |
1773 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 10) |
1774 | interm_pbpl++; |
1775 | |
1776 | wp->plane_blocks_per_line = div_fixed16(interm_pbpl, |
1777 | wp->y_min_scanlines); |
1778 | } else { |
1779 | interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,(((wp->plane_bytes_per_line) + ((wp->dbuf_block_size) - 1)) / (wp->dbuf_block_size)) |
1780 | wp->dbuf_block_size)(((wp->plane_bytes_per_line) + ((wp->dbuf_block_size) - 1)) / (wp->dbuf_block_size)); |
1781 | |
1782 | if (!wp->x_tiled || DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 10) |
1783 | interm_pbpl++; |
1784 | |
1785 | wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); |
1786 | } |
1787 | |
1788 | wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines, |
1789 | wp->plane_blocks_per_line); |
1790 | |
1791 | wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state)); |
1792 | |
1793 | return 0; |
1794 | } |
1795 | |
1796 | static int |
1797 | skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, |
1798 | const struct intel_plane_state *plane_state, |
1799 | struct skl_wm_params *wp, int color_plane) |
1800 | { |
1801 | const struct drm_framebuffer *fb = plane_state->hw.fb; |
1802 | int width; |
1803 | |
1804 | /* |
1805 | * Src coordinates are already rotated by 270 degrees for |
1806 | * the 90/270 degree plane rotation cases (to match the |
1807 | * GTT mapping), hence no need to account for rotation here. |
1808 | */ |
1809 | width = drm_rect_width(&plane_state->uapi.src) >> 16; |
1810 | |
1811 | return skl_compute_wm_params(crtc_state, width, |
1812 | fb->format, fb->modifier, |
1813 | plane_state->hw.rotation, |
1814 | intel_plane_pixel_rate(crtc_state, plane_state), |
1815 | wp, color_plane); |
1816 | } |
1817 | |
1818 | static bool_Bool skl_wm_has_lines(struct drm_i915_privateinteldrm_softc *i915, int level) |
1819 | { |
1820 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 10) |
1821 | return true1; |
1822 | |
1823 | /* The number of lines are ignored for the level 0 watermark. */ |
1824 | return level > 0; |
1825 | } |
1826 | |
1827 | static int skl_wm_max_lines(struct drm_i915_privateinteldrm_softc *i915) |
1828 | { |
1829 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 13) |
1830 | return 255; |
1831 | else |
1832 | return 31; |
1833 | } |
1834 | |
1835 | static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, |
1836 | struct intel_plane *plane, |
1837 | int level, |
1838 | unsigned int latency, |
1839 | const struct skl_wm_params *wp, |
1840 | const struct skl_wm_level *result_prev, |
1841 | struct skl_wm_level *result /* out */) |
1842 | { |
1843 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc_state->uapi.crtc->dev); |
1844 | uint_fixed_16_16_t method1, method2; |
1845 | uint_fixed_16_16_t selected_result; |
1846 | u32 blocks, lines, min_ddb_alloc = 0; |
1847 | |
1848 | if (latency == 0 || |
1849 | (use_minimal_wm0_only(crtc_state, plane) && level > 0)) { |
1850 | /* reject it */ |
1851 | result->min_ddb_alloc = U16_MAX0xffff; |
1852 | return; |
1853 | } |
1854 | |
1855 | /* |
1856 | * WaIncreaseLatencyIPCEnabled: kbl,cfl |
1857 | * Display WA #1141: kbl,cfl |
1858 | */ |
1859 | if ((IS_KABYLAKE(i915)IS_PLATFORM(i915, INTEL_KABYLAKE) || IS_COFFEELAKE(i915)IS_PLATFORM(i915, INTEL_COFFEELAKE) || IS_COMETLAKE(i915)IS_PLATFORM(i915, INTEL_COMETLAKE)) && |
1860 | skl_watermark_ipc_enabled(i915)) |
1861 | latency += 4; |
1862 | |
1863 | if (skl_needs_memory_bw_wa(i915) && wp->x_tiled) |
1864 | latency += 15; |
1865 | |
1866 | method1 = skl_wm_method1(i915, wp->plane_pixel_rate, |
1867 | wp->cpp, latency, wp->dbuf_block_size); |
1868 | method2 = skl_wm_method2(wp->plane_pixel_rate, |
1869 | crtc_state->hw.pipe_mode.crtc_htotal, |
1870 | latency, |
1871 | wp->plane_blocks_per_line); |
1872 | |
1873 | if (wp->y_tiled) { |
1874 | selected_result = max_fixed16(method2, wp->y_tile_minimum); |
1875 | } else { |
1876 | if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal / |
1877 | wp->dbuf_block_size < 1) && |
1878 | (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { |
1879 | selected_result = method2; |
1880 | } else if (latency >= wp->linetime_us) { |
1881 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) == 9) |
1882 | selected_result = min_fixed16(method1, method2); |
1883 | else |
1884 | selected_result = method2; |
1885 | } else { |
1886 | selected_result = method1; |
1887 | } |
1888 | } |
1889 | |
1890 | blocks = fixed16_to_u32_round_up(selected_result) + 1; |
1891 | /* |
1892 | * Lets have blocks at minimum equivalent to plane_blocks_per_line |
1893 | * as there will be at minimum one line for lines configuration. This |
1894 | * is a work around for FIFO underruns observed with resolutions like |
1895 | * 4k 60 Hz in single channel DRAM configurations. |
1896 | * |
1897 | * As per the Bspec 49325, if the ddb allocation can hold at least |
1898 | * one plane_blocks_per_line, we should have selected method2 in |
1899 | * the above logic. Assuming that modern versions have enough dbuf |
1900 | * and method2 guarantees blocks equivalent to at least 1 line, |
1901 | * select the blocks as plane_blocks_per_line. |
1902 | * |
1903 | * TODO: Revisit the logic when we have better understanding on DRAM |
1904 | * channels' impact on the level 0 memory latency and the relevant |
1905 | * wm calculations. |
1906 | */ |
1907 | if (skl_wm_has_lines(i915, level)) |
1908 | blocks = max(blocks,(((blocks)>(fixed16_to_u32_round_up(wp->plane_blocks_per_line )))?(blocks):(fixed16_to_u32_round_up(wp->plane_blocks_per_line ))) |
1909 | fixed16_to_u32_round_up(wp->plane_blocks_per_line))(((blocks)>(fixed16_to_u32_round_up(wp->plane_blocks_per_line )))?(blocks):(fixed16_to_u32_round_up(wp->plane_blocks_per_line ))); |
1910 | lines = div_round_up_fixed16(selected_result, |
1911 | wp->plane_blocks_per_line); |
1912 | |
1913 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) == 9) { |
1914 | /* Display WA #1125: skl,bxt,kbl */ |
1915 | if (level == 0 && wp->rc_surface) |
1916 | blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); |
1917 | |
1918 | /* Display WA #1126: skl,bxt,kbl */ |
1919 | if (level >= 1 && level <= 7) { |
1920 | if (wp->y_tiled) { |
1921 | blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); |
1922 | lines += wp->y_min_scanlines; |
1923 | } else { |
1924 | blocks++; |
1925 | } |
1926 | |
1927 | /* |
1928 | * Make sure result blocks for higher latency levels are |
1929 | * at least as high as level below the current level. |
1930 | * Assumption in DDB algorithm optimization for special |
1931 | * cases. Also covers Display WA #1125 for RC. |
1932 | */ |
1933 | if (result_prev->blocks > blocks) |
1934 | blocks = result_prev->blocks; |
1935 | } |
1936 | } |
1937 | |
1938 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11) { |
1939 | if (wp->y_tiled) { |
1940 | int extra_lines; |
1941 | |
1942 | if (lines % wp->y_min_scanlines == 0) |
1943 | extra_lines = wp->y_min_scanlines; |
1944 | else |
1945 | extra_lines = wp->y_min_scanlines * 2 - |
1946 | lines % wp->y_min_scanlines; |
1947 | |
1948 | min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines, |
1949 | wp->plane_blocks_per_line); |
1950 | } else { |
1951 | min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10)(((blocks) + ((10) - 1)) / (10)); |
1952 | } |
1953 | } |
1954 | |
1955 | if (!skl_wm_has_lines(i915, level)) |
1956 | lines = 0; |
1957 | |
1958 | if (lines > skl_wm_max_lines(i915)) { |
1959 | /* reject it */ |
1960 | result->min_ddb_alloc = U16_MAX0xffff; |
1961 | return; |
1962 | } |
1963 | |
1964 | /* |
1965 | * If lines is valid, assume we can use this watermark level |
1966 | * for now. We'll come back and disable it after we calculate the |
1967 | * DDB allocation if it turns out we don't actually have enough |
1968 | * blocks to satisfy it. |
1969 | */ |
1970 | result->blocks = blocks; |
1971 | result->lines = lines; |
1972 | /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */ |
1973 | result->min_ddb_alloc = max(min_ddb_alloc, blocks)(((min_ddb_alloc)>(blocks))?(min_ddb_alloc):(blocks)) + 1; |
1974 | result->enable = true1; |
1975 | |
1976 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) < 12 && i915->display.sagv.block_time_us) |
1977 | result->can_sagv = latency >= i915->display.sagv.block_time_us; |
1978 | } |
1979 | |
1980 | static void |
1981 | skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, |
1982 | struct intel_plane *plane, |
1983 | const struct skl_wm_params *wm_params, |
1984 | struct skl_wm_level *levels) |
1985 | { |
1986 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc_state->uapi.crtc->dev); |
1987 | int level, max_level = ilk_wm_max_level(i915); |
1988 | struct skl_wm_level *result_prev = &levels[0]; |
1989 | |
1990 | for (level = 0; level <= max_level; level++) { |
1991 | struct skl_wm_level *result = &levels[level]; |
1992 | unsigned int latency = i915->display.wm.skl_latency[level]; |
1993 | |
1994 | skl_compute_plane_wm(crtc_state, plane, level, latency, |
1995 | wm_params, result_prev, result); |
1996 | |
1997 | result_prev = result; |
1998 | } |
1999 | } |
2000 | |
2001 | static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, |
2002 | struct intel_plane *plane, |
2003 | const struct skl_wm_params *wm_params, |
2004 | struct skl_plane_wm *plane_wm) |
2005 | { |
2006 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc_state->uapi.crtc->dev); |
2007 | struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0; |
2008 | struct skl_wm_level *levels = plane_wm->wm; |
2009 | unsigned int latency = 0; |
2010 | |
2011 | if (i915->display.sagv.block_time_us) |
2012 | latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0]; |
2013 | |
2014 | skl_compute_plane_wm(crtc_state, plane, 0, latency, |
2015 | wm_params, &levels[0], |
2016 | sagv_wm); |
2017 | } |
2018 | |
2019 | static void skl_compute_transition_wm(struct drm_i915_privateinteldrm_softc *i915, |
2020 | struct skl_wm_level *trans_wm, |
2021 | const struct skl_wm_level *wm0, |
2022 | const struct skl_wm_params *wp) |
2023 | { |
2024 | u16 trans_min, trans_amount, trans_y_tile_min; |
2025 | u16 wm0_blocks, trans_offset, blocks; |
2026 | |
2027 | /* Transition WM don't make any sense if ipc is disabled */ |
2028 | if (!skl_watermark_ipc_enabled(i915)) |
2029 | return; |
2030 | |
2031 | /* |
2032 | * WaDisableTWM:skl,kbl,cfl,bxt |
2033 | * Transition WM are not recommended by HW team for GEN9 |
2034 | */ |
2035 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) == 9) |
2036 | return; |
2037 | |
2038 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11) |
2039 | trans_min = 4; |
2040 | else |
2041 | trans_min = 14; |
2042 | |
2043 | /* Display WA #1140: glk,cnl */ |
2044 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) == 10) |
2045 | trans_amount = 0; |
2046 | else |
2047 | trans_amount = 10; /* This is configurable amount */ |
2048 | |
2049 | trans_offset = trans_min + trans_amount; |
2050 | |
2051 | /* |
2052 | * The spec asks for Selected Result Blocks for wm0 (the real value), |
2053 | * not Result Blocks (the integer value). Pay attention to the capital |
2054 | * letters. The value wm_l0->blocks is actually Result Blocks, but |
2055 | * since Result Blocks is the ceiling of Selected Result Blocks plus 1, |
2056 | * and since we later will have to get the ceiling of the sum in the |
2057 | * transition watermarks calculation, we can just pretend Selected |
2058 | * Result Blocks is Result Blocks minus 1 and it should work for the |
2059 | * current platforms. |
2060 | */ |
2061 | wm0_blocks = wm0->blocks - 1; |
2062 | |
2063 | if (wp->y_tiled) { |
2064 | trans_y_tile_min = |
2065 | (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum); |
2066 | blocks = max(wm0_blocks, trans_y_tile_min)(((wm0_blocks)>(trans_y_tile_min))?(wm0_blocks):(trans_y_tile_min )) + trans_offset; |
2067 | } else { |
2068 | blocks = wm0_blocks + trans_offset; |
2069 | } |
2070 | blocks++; |
2071 | |
2072 | /* |
2073 | * Just assume we can enable the transition watermark. After |
2074 | * computing the DDB we'll come back and disable it if that |
2075 | * assumption turns out to be false. |
2076 | */ |
2077 | trans_wm->blocks = blocks; |
2078 | trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1)({ u16 __max_a = (wm0->min_ddb_alloc); u16 __max_b = (blocks + 1); __max_a > __max_b ? __max_a : __max_b; }); |
2079 | trans_wm->enable = true1; |
2080 | } |
2081 | |
2082 | static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, |
2083 | const struct intel_plane_state *plane_state, |
2084 | struct intel_plane *plane, int color_plane) |
2085 | { |
2086 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (crtc_state->uapi.crtc); (struct intel_crtc *)( (char * )__mptr - __builtin_offsetof(struct intel_crtc, base) );}); |
2087 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
2088 | struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; |
2089 | struct skl_wm_params wm_params; |
2090 | int ret; |
2091 | |
2092 | ret = skl_compute_plane_wm_params(crtc_state, plane_state, |
2093 | &wm_params, color_plane); |
2094 | if (ret) |
2095 | return ret; |
2096 | |
2097 | skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm); |
2098 | |
2099 | skl_compute_transition_wm(i915, &wm->trans_wm, |
2100 | &wm->wm[0], &wm_params); |
2101 | |
2102 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 12) { |
2103 | tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm); |
2104 | |
2105 | skl_compute_transition_wm(i915, &wm->sagv.trans_wm, |
2106 | &wm->sagv.wm0, &wm_params); |
2107 | } |
2108 | |
2109 | return 0; |
2110 | } |
2111 | |
2112 | static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, |
2113 | const struct intel_plane_state *plane_state, |
2114 | struct intel_plane *plane) |
2115 | { |
2116 | struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; |
2117 | struct skl_wm_params wm_params; |
2118 | int ret; |
2119 | |
2120 | wm->is_planar = true1; |
2121 | |
2122 | /* uv plane watermarks must also be validated for NV12/Planar */ |
2123 | ret = skl_compute_plane_wm_params(crtc_state, plane_state, |
2124 | &wm_params, 1); |
2125 | if (ret) |
2126 | return ret; |
2127 | |
2128 | skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm); |
2129 | |
2130 | return 0; |
2131 | } |
2132 | |
2133 | static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, |
2134 | const struct intel_plane_state *plane_state) |
2135 | { |
2136 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane)({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = (plane_state->uapi.plane); (struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}); |
2137 | enum plane_id plane_id = plane->id; |
2138 | struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; |
2139 | const struct drm_framebuffer *fb = plane_state->hw.fb; |
2140 | int ret; |
2141 | |
2142 | memset(wm, 0, sizeof(*wm))__builtin_memset((wm), (0), (sizeof(*wm))); |
2143 | |
2144 | if (!intel_wm_plane_visible(crtc_state, plane_state)) |
2145 | return 0; |
2146 | |
2147 | ret = skl_build_plane_wm_single(crtc_state, plane_state, |
2148 | plane, 0); |
2149 | if (ret) |
2150 | return ret; |
2151 | |
2152 | if (fb->format->is_yuv && fb->format->num_planes > 1) { |
2153 | ret = skl_build_plane_wm_uv(crtc_state, plane_state, |
2154 | plane); |
2155 | if (ret) |
2156 | return ret; |
2157 | } |
2158 | |
2159 | return 0; |
2160 | } |
2161 | |
2162 | static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, |
2163 | const struct intel_plane_state *plane_state) |
2164 | { |
2165 | struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane)({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = (plane_state->uapi.plane); (struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}); |
2166 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(plane->base.dev); |
2167 | enum plane_id plane_id = plane->id; |
2168 | struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; |
2169 | int ret; |
2170 | |
2171 | /* Watermarks calculated in master */ |
2172 | if (plane_state->planar_slave) |
2173 | return 0; |
2174 | |
2175 | memset(wm, 0, sizeof(*wm))__builtin_memset((wm), (0), (sizeof(*wm))); |
2176 | |
2177 | if (plane_state->planar_linked_plane) { |
2178 | const struct drm_framebuffer *fb = plane_state->hw.fb; |
2179 | |
2180 | drm_WARN_ON(&i915->drm,({ int __ret = !!((!intel_wm_plane_visible(crtc_state, plane_state ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "!intel_wm_plane_visible(crtc_state, plane_state)" ")"); __builtin_expect(!!(__ret), 0); }) |
2181 | !intel_wm_plane_visible(crtc_state, plane_state))({ int __ret = !!((!intel_wm_plane_visible(crtc_state, plane_state ))); if (__ret) printf("%s %s: " "%s", dev_driver_string(((& i915->drm))->dev), "", "drm_WARN_ON(" "!intel_wm_plane_visible(crtc_state, plane_state)" ")"); __builtin_expect(!!(__ret), 0); }); |
2182 | drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||({ int __ret = !!((!fb->format->is_yuv || fb->format ->num_planes == 1)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&i915->drm))->dev), "", "drm_WARN_ON(" "!fb->format->is_yuv || fb->format->num_planes == 1" ")"); __builtin_expect(!!(__ret), 0); }) |
2183 | fb->format->num_planes == 1)({ int __ret = !!((!fb->format->is_yuv || fb->format ->num_planes == 1)); if (__ret) printf("%s %s: " "%s", dev_driver_string (((&i915->drm))->dev), "", "drm_WARN_ON(" "!fb->format->is_yuv || fb->format->num_planes == 1" ")"); __builtin_expect(!!(__ret), 0); }); |
2184 | |
2185 | ret = skl_build_plane_wm_single(crtc_state, plane_state, |
2186 | plane_state->planar_linked_plane, 0); |
2187 | if (ret) |
2188 | return ret; |
2189 | |
2190 | ret = skl_build_plane_wm_single(crtc_state, plane_state, |
2191 | plane, 1); |
2192 | if (ret) |
2193 | return ret; |
2194 | } else if (intel_wm_plane_visible(crtc_state, plane_state)) { |
2195 | ret = skl_build_plane_wm_single(crtc_state, plane_state, |
2196 | plane, 0); |
2197 | if (ret) |
2198 | return ret; |
2199 | } |
2200 | |
2201 | return 0; |
2202 | } |
2203 | |
2204 | static int skl_build_pipe_wm(struct intel_atomic_state *state, |
2205 | struct intel_crtc *crtc) |
2206 | { |
2207 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
2208 | struct intel_crtc_state *crtc_state = |
2209 | intel_atomic_get_new_crtc_state(state, crtc); |
2210 | const struct intel_plane_state *plane_state; |
2211 | struct intel_plane *plane; |
2212 | int ret, i; |
2213 | |
2214 | for_each_new_intel_plane_in_state(state, plane, plane_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_total_plane && ((plane) = ({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = ((state)->base.planes[i].ptr); ( struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}), (plane_state) = ({ const __typeof( ( (struct intel_plane_state *)0)->uapi ) *__mptr = ((state)-> base.planes[i].new_state); (struct intel_plane_state *)( (char *)__mptr - __builtin_offsetof(struct intel_plane_state, uapi ) );}), 1); (i)++) if (!(plane)) {} else { |
2215 | /* |
2216 | * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc |
2217 | * instead but we don't populate that correctly for NV12 Y |
2218 | * planes so for now hack this. |
2219 | */ |
2220 | if (plane->pipe != crtc->pipe) |
2221 | continue; |
2222 | |
2223 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11) |
2224 | ret = icl_build_plane_wm(crtc_state, plane_state); |
2225 | else |
2226 | ret = skl_build_plane_wm(crtc_state, plane_state); |
2227 | if (ret) |
2228 | return ret; |
2229 | } |
2230 | |
2231 | crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw; |
2232 | |
2233 | return 0; |
2234 | } |
2235 | |
2236 | static void skl_ddb_entry_write(struct drm_i915_privateinteldrm_softc *i915, |
2237 | i915_reg_t reg, |
2238 | const struct skl_ddb_entry *entry) |
2239 | { |
2240 | if (entry->end) |
2241 | intel_de_write_fw(i915, reg, |
2242 | PLANE_BUF_END(entry->end - 1)((u32)((((typeof(((u32)((((~0UL) >> (64 - (27) - 1)) & ((~0UL) << (16))) + 0))))((entry->end - 1)) << (__builtin_ffsll(((u32)((((~0UL) >> (64 - (27) - 1)) & ((~0UL) << (16))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (27) - 1)) & ((~0UL) << (16))) + 0)))) + 0 + 0 + 0 + 0)) | |
2243 | PLANE_BUF_START(entry->start)((u32)((((typeof(((u32)((((~0UL) >> (64 - (11) - 1)) & ((~0UL) << (0))) + 0))))((entry->start)) << ( __builtin_ffsll(((u32)((((~0UL) >> (64 - (11) - 1)) & ((~0UL) << (0))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (11) - 1)) & ((~0UL) << (0))) + 0)))) + 0 + 0 + 0 + 0))); |
2244 | else |
2245 | intel_de_write_fw(i915, reg, 0); |
2246 | } |
2247 | |
2248 | static void skl_write_wm_level(struct drm_i915_privateinteldrm_softc *i915, |
2249 | i915_reg_t reg, |
2250 | const struct skl_wm_level *level) |
2251 | { |
2252 | u32 val = 0; |
2253 | |
2254 | if (level->enable) |
2255 | val |= PLANE_WM_EN(1 << 31); |
2256 | if (level->ignore_lines) |
2257 | val |= PLANE_WM_IGNORE_LINES(1 << 30); |
2258 | val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks)((u32)((((typeof(((u32)((((~0UL) >> (64 - (11) - 1)) & ((~0UL) << (0))) + 0))))(level->blocks) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (11) - 1)) & ((~0UL) << (0))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (11 ) - 1)) & ((~0UL) << (0))) + 0)))) + 0 + 0 + 0 + 0) ); |
2259 | val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines)((u32)((((typeof(((u32)((((~0UL) >> (64 - (26) - 1)) & ((~0UL) << (14))) + 0))))(level->lines) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (26) - 1)) & ((~0UL) << (14))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (26 ) - 1)) & ((~0UL) << (14))) + 0)))) + 0 + 0 + 0 + 0 )); |
2260 | |
2261 | intel_de_write_fw(i915, reg, val); |
2262 | } |
2263 | |
2264 | void skl_write_plane_wm(struct intel_plane *plane, |
2265 | const struct intel_crtc_state *crtc_state) |
2266 | { |
2267 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(plane->base.dev); |
2268 | int level, max_level = ilk_wm_max_level(i915); |
2269 | enum plane_id plane_id = plane->id; |
2270 | enum pipe pipe = plane->pipe; |
2271 | const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; |
2272 | const struct skl_ddb_entry *ddb = |
2273 | &crtc_state->wm.skl.plane_ddb[plane_id]; |
2274 | const struct skl_ddb_entry *ddb_y = |
2275 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; |
2276 | |
2277 | for (level = 0; level <= max_level; level++) |
2278 | skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level)((const i915_reg_t){ .reg = (((((0x70240) + (pipe) * ((0x71240 ) - (0x70240)))) + (plane_id) * ((((0x70340) + (pipe) * ((0x71340 ) - (0x70340)))) - (((0x70240) + (pipe) * ((0x71240) - (0x70240 )))))) + ((4) * (level))) }), |
2279 | skl_plane_wm_level(pipe_wm, plane_id, level)); |
2280 | |
2281 | skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x70268) + (pipe) * ((0x71268 ) - (0x70268)))) + (plane_id) * ((((0x70368) + (pipe) * ((0x71368 ) - (0x70368)))) - (((0x70268) + (pipe) * ((0x71268) - (0x70268 ))))))) }), |
2282 | skl_plane_trans_wm(pipe_wm, plane_id)); |
2283 | |
2284 | if (HAS_HW_SAGV_WM(i915)(((&(i915)->__runtime)->display.ip.ver) >= 13 && !((&(i915)->__info)->is_dgfx))) { |
2285 | const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; |
2286 | |
2287 | skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x70258) + (pipe) * ((0x71258 ) - (0x70258)))) + (plane_id) * ((((0x70358) + (pipe) * ((0x71358 ) - (0x70358)))) - (((0x70258) + (pipe) * ((0x71258) - (0x70258 ))))))) }), |
2288 | &wm->sagv.wm0); |
2289 | skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x7025C) + (pipe) * ((0x7125C ) - (0x7025C)))) + (plane_id) * ((((0x7035C) + (pipe) * ((0x7135C ) - (0x7035C)))) - (((0x7025C) + (pipe) * ((0x7125C) - (0x7025C ))))))) }), |
2290 | &wm->sagv.trans_wm); |
2291 | } |
2292 | |
2293 | skl_ddb_entry_write(i915, |
2294 | PLANE_BUF_CFG(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x7027c) + (pipe) * ((0x7127c ) - (0x7027c)))) + (plane_id) * ((((0x7037c) + (pipe) * ((0x7137c ) - (0x7037c)))) - (((0x7027c) + (pipe) * ((0x7127c) - (0x7027c ))))))) }), ddb); |
2295 | |
2296 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) < 11) |
2297 | skl_ddb_entry_write(i915, |
2298 | PLANE_NV12_BUF_CFG(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x70278) + (pipe) * ((0x71278 ) - (0x70278)))) + (plane_id) * ((((0x70378) + (pipe) * ((0x71378 ) - (0x70378)))) - (((0x70278) + (pipe) * ((0x71278) - (0x70278 ))))))) }), ddb_y); |
2299 | } |
2300 | |
2301 | void skl_write_cursor_wm(struct intel_plane *plane, |
2302 | const struct intel_crtc_state *crtc_state) |
2303 | { |
2304 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(plane->base.dev); |
2305 | int level, max_level = ilk_wm_max_level(i915); |
2306 | enum plane_id plane_id = plane->id; |
2307 | enum pipe pipe = plane->pipe; |
2308 | const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; |
2309 | const struct skl_ddb_entry *ddb = |
2310 | &crtc_state->wm.skl.plane_ddb[plane_id]; |
2311 | |
2312 | for (level = 0; level <= max_level; level++) |
2313 | skl_write_wm_level(i915, CUR_WM(pipe, level)((const i915_reg_t){ .reg = (((0x70140) + (pipe) * ((0x71140) - (0x70140))) + ((4) * (level))) }), |
2314 | skl_plane_wm_level(pipe_wm, plane_id, level)); |
2315 | |
2316 | skl_write_wm_level(i915, CUR_WM_TRANS(pipe)((const i915_reg_t){ .reg = (((0x70168) + (pipe) * ((0x71168) - (0x70168)))) }), |
2317 | skl_plane_trans_wm(pipe_wm, plane_id)); |
2318 | |
2319 | if (HAS_HW_SAGV_WM(i915)(((&(i915)->__runtime)->display.ip.ver) >= 13 && !((&(i915)->__info)->is_dgfx))) { |
2320 | const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; |
2321 | |
2322 | skl_write_wm_level(i915, CUR_WM_SAGV(pipe)((const i915_reg_t){ .reg = (((0x70158) + (pipe) * ((0x71158) - (0x70158)))) }), |
2323 | &wm->sagv.wm0); |
2324 | skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe)((const i915_reg_t){ .reg = (((0x7015C) + (pipe) * ((0x7115C) - (0x7015C)))) }), |
2325 | &wm->sagv.trans_wm); |
2326 | } |
2327 | |
2328 | skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe)((const i915_reg_t){ .reg = (((0x7017c) + (pipe) * ((0x7117c) - (0x7017c)))) }), ddb); |
2329 | } |
2330 | |
2331 | static bool_Bool skl_wm_level_equals(const struct skl_wm_level *l1, |
2332 | const struct skl_wm_level *l2) |
2333 | { |
2334 | return l1->enable == l2->enable && |
2335 | l1->ignore_lines == l2->ignore_lines && |
2336 | l1->lines == l2->lines && |
2337 | l1->blocks == l2->blocks; |
2338 | } |
2339 | |
2340 | static bool_Bool skl_plane_wm_equals(struct drm_i915_privateinteldrm_softc *i915, |
2341 | const struct skl_plane_wm *wm1, |
2342 | const struct skl_plane_wm *wm2) |
2343 | { |
2344 | int level, max_level = ilk_wm_max_level(i915); |
2345 | |
2346 | for (level = 0; level <= max_level; level++) { |
2347 | /* |
2348 | * We don't check uv_wm as the hardware doesn't actually |
2349 | * use it. It only gets used for calculating the required |
2350 | * ddb allocation. |
2351 | */ |
2352 | if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level])) |
2353 | return false0; |
2354 | } |
2355 | |
2356 | return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) && |
2357 | skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) && |
2358 | skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm); |
2359 | } |
2360 | |
2361 | static bool_Bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, |
2362 | const struct skl_ddb_entry *b) |
2363 | { |
2364 | return a->start < b->end && b->start < a->end; |
2365 | } |
2366 | |
2367 | static void skl_ddb_entry_union(struct skl_ddb_entry *a, |
2368 | const struct skl_ddb_entry *b) |
2369 | { |
2370 | if (a->end && b->end) { |
2371 | a->start = min(a->start, b->start)(((a->start)<(b->start))?(a->start):(b->start) ); |
2372 | a->end = max(a->end, b->end)(((a->end)>(b->end))?(a->end):(b->end)); |
2373 | } else if (b->end) { |
2374 | a->start = b->start; |
2375 | a->end = b->end; |
2376 | } |
2377 | } |
2378 | |
2379 | bool_Bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, |
2380 | const struct skl_ddb_entry *entries, |
2381 | int num_entries, int ignore_idx) |
2382 | { |
2383 | int i; |
2384 | |
2385 | for (i = 0; i < num_entries; i++) { |
2386 | if (i != ignore_idx && |
2387 | skl_ddb_entries_overlap(ddb, &entries[i])) |
2388 | return true1; |
2389 | } |
2390 | |
2391 | return false0; |
2392 | } |
2393 | |
2394 | static int |
2395 | skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, |
2396 | struct intel_crtc_state *new_crtc_state) |
2397 | { |
2398 | struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state)({ const __typeof( ((struct intel_atomic_state *)0)->base ) *__mptr = (new_crtc_state->uapi.state); (struct intel_atomic_state *)( (char *)__mptr - __builtin_offsetof(struct intel_atomic_state , base) );}); |
2399 | struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc)({ const __typeof( ((struct intel_crtc *)0)->base ) *__mptr = (new_crtc_state->uapi.crtc); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc, base) );}); |
2400 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
2401 | struct intel_plane *plane; |
2402 | |
2403 | for_each_intel_plane_on_crtc(&i915->drm, crtc, plane)for (plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = ((&(&i915->drm)->mode_config. plane_list)->next); (__typeof(*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base.head) );}); &plane ->base.head != (&(&i915->drm)->mode_config.plane_list ); plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = (plane->base.head.next); (__typeof(*plane ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base .head) );})) if (!((plane)->pipe == (crtc)->pipe)) {} else { |
2404 | struct intel_plane_state *plane_state; |
2405 | enum plane_id plane_id = plane->id; |
2406 | |
2407 | if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id], |
2408 | &new_crtc_state->wm.skl.plane_ddb[plane_id]) && |
2409 | skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id], |
2410 | &new_crtc_state->wm.skl.plane_ddb_y[plane_id])) |
2411 | continue; |
2412 | |
2413 | plane_state = intel_atomic_get_plane_state(state, plane); |
2414 | if (IS_ERR(plane_state)) |
2415 | return PTR_ERR(plane_state); |
2416 | |
2417 | new_crtc_state->update_planes |= BIT(plane_id)(1UL << (plane_id)); |
2418 | } |
2419 | |
2420 | return 0; |
2421 | } |
2422 | |
2423 | static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state) |
2424 | { |
2425 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(dbuf_state->base.state->base.dev); |
2426 | u8 enabled_slices; |
2427 | enum pipe pipe; |
2428 | |
2429 | /* |
2430 | * FIXME: For now we always enable slice S1 as per |
2431 | * the Bspec display initialization sequence. |
2432 | */ |
2433 | enabled_slices = BIT(DBUF_S1)(1UL << (DBUF_S1)); |
2434 | |
2435 | for_each_pipe(i915, pipe)for ((pipe) = 0; (pipe) < I915_MAX_PIPES; (pipe)++) if (!( (&(i915)->__runtime)->pipe_mask & (1UL << (pipe)))) {} else |
2436 | enabled_slices |= dbuf_state->slices[pipe]; |
2437 | |
2438 | return enabled_slices; |
2439 | } |
2440 | |
2441 | static int |
2442 | skl_compute_ddb(struct intel_atomic_state *state) |
2443 | { |
2444 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
2445 | const struct intel_dbuf_state *old_dbuf_state; |
2446 | struct intel_dbuf_state *new_dbuf_state = NULL((void *)0); |
2447 | const struct intel_crtc_state *old_crtc_state; |
2448 | struct intel_crtc_state *new_crtc_state; |
2449 | struct intel_crtc *crtc; |
2450 | int ret, i; |
2451 | |
2452 | for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else { |
2453 | new_dbuf_state = intel_atomic_get_dbuf_state(state); |
2454 | if (IS_ERR(new_dbuf_state)) |
2455 | return PTR_ERR(new_dbuf_state); |
2456 | |
2457 | old_dbuf_state = intel_atomic_get_old_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_old_global_obj_state(state, & to_i915(state->base.dev)->display.dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
2458 | break; |
2459 | } |
2460 | |
2461 | if (!new_dbuf_state) |
2462 | return 0; |
2463 | |
2464 | new_dbuf_state->active_pipes = |
2465 | intel_calc_active_pipes(state, old_dbuf_state->active_pipes); |
2466 | |
2467 | if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) { |
2468 | ret = intel_atomic_lock_global_state(&new_dbuf_state->base); |
2469 | if (ret) |
2470 | return ret; |
2471 | } |
2472 | |
2473 | if (HAS_MBUS_JOINING(i915)(IS_PLATFORM(i915, INTEL_ALDERLAKE_P) || ((&(i915)->__runtime )->display.ip.ver) >= 14)) |
2474 | new_dbuf_state->joined_mbus = |
2475 | adlp_check_mbus_joined(new_dbuf_state->active_pipes); |
2476 | |
2477 | for_each_intel_crtc(&i915->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&i915->drm)->mode_config. crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&i915->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) *__mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) );})) { |
2478 | enum pipe pipe = crtc->pipe; |
2479 | |
2480 | new_dbuf_state->slices[pipe] = |
2481 | skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes, |
2482 | new_dbuf_state->joined_mbus); |
2483 | |
2484 | if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe]) |
2485 | continue; |
2486 | |
2487 | ret = intel_atomic_lock_global_state(&new_dbuf_state->base); |
2488 | if (ret) |
2489 | return ret; |
2490 | } |
2491 | |
2492 | new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state); |
2493 | |
2494 | if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices || |
2495 | old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { |
2496 | ret = intel_atomic_serialize_global_state(&new_dbuf_state->base); |
2497 | if (ret) |
2498 | return ret; |
2499 | |
2500 | if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { |
2501 | /* TODO: Implement vblank synchronized MBUS joining changes */ |
2502 | ret = intel_modeset_all_pipes(state); |
2503 | if (ret) |
2504 | return ret; |
2505 | } |
2506 | |
2507 | drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n" , old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices , (&(i915)->__info)->display.dbuf.slice_mask, str_yes_no (old_dbuf_state->joined_mbus), str_yes_no(new_dbuf_state-> joined_mbus)) |
2508 | "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n" , old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices , (&(i915)->__info)->display.dbuf.slice_mask, str_yes_no (old_dbuf_state->joined_mbus), str_yes_no(new_dbuf_state-> joined_mbus)) |
2509 | old_dbuf_state->enabled_slices,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n" , old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices , (&(i915)->__info)->display.dbuf.slice_mask, str_yes_no (old_dbuf_state->joined_mbus), str_yes_no(new_dbuf_state-> joined_mbus)) |
2510 | new_dbuf_state->enabled_slices,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n" , old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices , (&(i915)->__info)->display.dbuf.slice_mask, str_yes_no (old_dbuf_state->joined_mbus), str_yes_no(new_dbuf_state-> joined_mbus)) |
2511 | INTEL_INFO(i915)->display.dbuf.slice_mask,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n" , old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices , (&(i915)->__info)->display.dbuf.slice_mask, str_yes_no (old_dbuf_state->joined_mbus), str_yes_no(new_dbuf_state-> joined_mbus)) |
2512 | str_yes_no(old_dbuf_state->joined_mbus),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n" , old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices , (&(i915)->__info)->display.dbuf.slice_mask, str_yes_no (old_dbuf_state->joined_mbus), str_yes_no(new_dbuf_state-> joined_mbus)) |
2513 | str_yes_no(new_dbuf_state->joined_mbus))__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n" , old_dbuf_state->enabled_slices, new_dbuf_state->enabled_slices , (&(i915)->__info)->display.dbuf.slice_mask, str_yes_no (old_dbuf_state->joined_mbus), str_yes_no(new_dbuf_state-> joined_mbus)); |
2514 | } |
2515 | |
2516 | for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else { |
2517 | enum pipe pipe = crtc->pipe; |
2518 | |
2519 | new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state); |
2520 | |
2521 | if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe]) |
2522 | continue; |
2523 | |
2524 | ret = intel_atomic_lock_global_state(&new_dbuf_state->base); |
2525 | if (ret) |
2526 | return ret; |
2527 | } |
2528 | |
2529 | for_each_intel_crtc(&i915->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&i915->drm)->mode_config. crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&i915->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) *__mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) );})) { |
2530 | ret = skl_crtc_allocate_ddb(state, crtc); |
2531 | if (ret) |
2532 | return ret; |
2533 | } |
2534 | |
2535 | for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (old_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].old_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc_state , uapi) );}), 1); (i)++) if (!(crtc)) {} else |
2536 | new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (old_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].old_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc_state , uapi) );}), 1); (i)++) if (!(crtc)) {} else { |
2537 | ret = skl_crtc_allocate_plane_ddb(state, crtc); |
2538 | if (ret) |
2539 | return ret; |
2540 | |
2541 | ret = skl_ddb_add_affected_planes(old_crtc_state, |
2542 | new_crtc_state); |
2543 | if (ret) |
2544 | return ret; |
2545 | } |
2546 | |
2547 | return 0; |
2548 | } |
2549 | |
2550 | static char enast(bool_Bool enable) |
2551 | { |
2552 | return enable ? '*' : ' '; |
2553 | } |
2554 | |
2555 | static void |
2556 | skl_print_wm_changes(struct intel_atomic_state *state) |
2557 | { |
2558 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
2559 | const struct intel_crtc_state *old_crtc_state; |
2560 | const struct intel_crtc_state *new_crtc_state; |
2561 | struct intel_plane *plane; |
2562 | struct intel_crtc *crtc; |
2563 | int i; |
2564 | |
2565 | if (!drm_debug_enabled(DRM_UT_KMS)drm_debug_enabled_raw(DRM_UT_KMS)) |
2566 | return; |
2567 | |
2568 | for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (old_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].old_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc_state , uapi) );}), 1); (i)++) if (!(crtc)) {} else |
2569 | new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (old_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].old_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc_state , uapi) );}), 1); (i)++) if (!(crtc)) {} else { |
2570 | const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm; |
2571 | |
2572 | old_pipe_wm = &old_crtc_state->wm.skl.optimal; |
2573 | new_pipe_wm = &new_crtc_state->wm.skl.optimal; |
2574 | |
2575 | for_each_intel_plane_on_crtc(&i915->drm, crtc, plane)for (plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = ((&(&i915->drm)->mode_config. plane_list)->next); (__typeof(*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base.head) );}); &plane ->base.head != (&(&i915->drm)->mode_config.plane_list ); plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = (plane->base.head.next); (__typeof(*plane ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base .head) );})) if (!((plane)->pipe == (crtc)->pipe)) {} else { |
2576 | enum plane_id plane_id = plane->id; |
2577 | const struct skl_ddb_entry *old, *new; |
2578 | |
2579 | old = &old_crtc_state->wm.skl.plane_ddb[plane_id]; |
2580 | new = &new_crtc_state->wm.skl.plane_ddb[plane_id]; |
2581 | |
2582 | if (skl_ddb_entry_equal(old, new)) |
2583 | continue; |
2584 | |
2585 | drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n" , plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end, skl_ddb_entry_size( old), skl_ddb_entry_size(new)) |
2586 | "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n" , plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end, skl_ddb_entry_size( old), skl_ddb_entry_size(new)) |
2587 | plane->base.base.id, plane->base.name,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n" , plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end, skl_ddb_entry_size( old), skl_ddb_entry_size(new)) |
2588 | old->start, old->end, new->start, new->end,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n" , plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end, skl_ddb_entry_size( old), skl_ddb_entry_size(new)) |
2589 | skl_ddb_entry_size(old), skl_ddb_entry_size(new))__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n" , plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end, skl_ddb_entry_size( old), skl_ddb_entry_size(new)); |
2590 | } |
2591 | |
2592 | for_each_intel_plane_on_crtc(&i915->drm, crtc, plane)for (plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = ((&(&i915->drm)->mode_config. plane_list)->next); (__typeof(*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base.head) );}); &plane ->base.head != (&(&i915->drm)->mode_config.plane_list ); plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = (plane->base.head.next); (__typeof(*plane ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base .head) );})) if (!((plane)->pipe == (crtc)->pipe)) {} else { |
2593 | enum plane_id plane_id = plane->id; |
2594 | const struct skl_plane_wm *old_wm, *new_wm; |
2595 | |
2596 | old_wm = &old_pipe_wm->planes[plane_id]; |
2597 | new_wm = &new_pipe_wm->planes[plane_id]; |
2598 | |
2599 | if (skl_plane_wm_equals(i915, old_wm, new_wm)) |
2600 | continue; |
2601 | |
2602 | drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2603 | "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2604 | " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2605 | plane->base.base.id, plane->base.name,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2606 | enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2607 | enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2608 | enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2609 | enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2610 | enast(old_wm->trans_wm.enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2611 | enast(old_wm->sagv.wm0.enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2612 | enast(old_wm->sagv.trans_wm.enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2613 | enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2614 | enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2615 | enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2616 | enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2617 | enast(new_wm->trans_wm.enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2618 | enast(new_wm->sagv.wm0.enable),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)) |
2619 | enast(new_wm->sagv.trans_wm.enable))__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].enable), enast(old_wm->wm[1].enable), enast(old_wm-> wm[2].enable), enast(old_wm->wm[3].enable), enast(old_wm-> wm[4].enable), enast(old_wm->wm[5].enable), enast(old_wm-> wm[6].enable), enast(old_wm->wm[7].enable), enast(old_wm-> trans_wm.enable), enast(old_wm->sagv.wm0.enable), enast(old_wm ->sagv.trans_wm.enable), enast(new_wm->wm[0].enable), enast (new_wm->wm[1].enable), enast(new_wm->wm[2].enable), enast (new_wm->wm[3].enable), enast(new_wm->wm[4].enable), enast (new_wm->wm[5].enable), enast(new_wm->wm[6].enable), enast (new_wm->wm[7].enable), enast(new_wm->trans_wm.enable), enast(new_wm->sagv.wm0.enable), enast(new_wm->sagv.trans_wm .enable)); |
2620 | |
2621 | drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2622 | "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2623 | " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2624 | plane->base.base.id, plane->base.name,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2625 | enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2626 | enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2627 | enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2628 | enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2629 | enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2630 | enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2631 | enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2632 | enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2633 | enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2634 | enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2635 | enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2636 | enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2637 | enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2638 | enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2639 | enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2640 | enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2641 | enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2642 | enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2643 | enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2644 | enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2645 | enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines) |
2646 | enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n" , plane->base.base.id, plane->base.name, enast(old_wm-> wm[0].ignore_lines), old_wm->wm[0].lines, enast(old_wm-> wm[1].ignore_lines), old_wm->wm[1].lines, enast(old_wm-> wm[2].ignore_lines), old_wm->wm[2].lines, enast(old_wm-> wm[3].ignore_lines), old_wm->wm[3].lines, enast(old_wm-> wm[4].ignore_lines), old_wm->wm[4].lines, enast(old_wm-> wm[5].ignore_lines), old_wm->wm[5].lines, enast(old_wm-> wm[6].ignore_lines), old_wm->wm[6].lines, enast(old_wm-> wm[7].ignore_lines), old_wm->wm[7].lines, enast(old_wm-> trans_wm.ignore_lines), old_wm->trans_wm.lines, enast(old_wm ->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, enast (old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm .lines, enast(new_wm->wm[0].ignore_lines), new_wm->wm[0 ].lines, enast(new_wm->wm[1].ignore_lines), new_wm->wm[ 1].lines, enast(new_wm->wm[2].ignore_lines), new_wm->wm [2].lines, enast(new_wm->wm[3].ignore_lines), new_wm->wm [3].lines, enast(new_wm->wm[4].ignore_lines), new_wm->wm [4].lines, enast(new_wm->wm[5].ignore_lines), new_wm->wm [5].lines, enast(new_wm->wm[6].ignore_lines), new_wm->wm [6].lines, enast(new_wm->wm[7].ignore_lines), new_wm->wm [7].lines, enast(new_wm->trans_wm.ignore_lines), new_wm-> trans_wm.lines, enast(new_wm->sagv.wm0.ignore_lines), new_wm ->sagv.wm0.lines, enast(new_wm->sagv.trans_wm.ignore_lines ), new_wm->sagv.trans_wm.lines); |
2647 | |
2648 | drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2649 | "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2650 | " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2651 | plane->base.base.id, plane->base.name,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2652 | old_wm->wm[0].blocks, old_wm->wm[1].blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2653 | old_wm->wm[2].blocks, old_wm->wm[3].blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2654 | old_wm->wm[4].blocks, old_wm->wm[5].blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2655 | old_wm->wm[6].blocks, old_wm->wm[7].blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2656 | old_wm->trans_wm.blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2657 | old_wm->sagv.wm0.blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2658 | old_wm->sagv.trans_wm.blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2659 | new_wm->wm[0].blocks, new_wm->wm[1].blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2660 | new_wm->wm[2].blocks, new_wm->wm[3].blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2661 | new_wm->wm[4].blocks, new_wm->wm[5].blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2662 | new_wm->wm[6].blocks, new_wm->wm[7].blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2663 | new_wm->trans_wm.blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2664 | new_wm->sagv.wm0.blocks,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks) |
2665 | new_wm->sagv.trans_wm.blocks)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].blocks , old_wm->wm[1].blocks, old_wm->wm[2].blocks, old_wm-> wm[3].blocks, old_wm->wm[4].blocks, old_wm->wm[5].blocks , old_wm->wm[6].blocks, old_wm->wm[7].blocks, old_wm-> trans_wm.blocks, old_wm->sagv.wm0.blocks, old_wm->sagv. trans_wm.blocks, new_wm->wm[0].blocks, new_wm->wm[1].blocks , new_wm->wm[2].blocks, new_wm->wm[3].blocks, new_wm-> wm[4].blocks, new_wm->wm[5].blocks, new_wm->wm[6].blocks , new_wm->wm[7].blocks, new_wm->trans_wm.blocks, new_wm ->sagv.wm0.blocks, new_wm->sagv.trans_wm.blocks); |
2666 | |
2667 | drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2668 | "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2669 | " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2670 | plane->base.base.id, plane->base.name,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2671 | old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2672 | old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2673 | old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2674 | old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2675 | old_wm->trans_wm.min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2676 | old_wm->sagv.wm0.min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2677 | old_wm->sagv.trans_wm.min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2678 | new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2679 | new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2680 | new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2681 | new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2682 | new_wm->trans_wm.min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2683 | new_wm->sagv.wm0.min_ddb_alloc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ) |
2684 | new_wm->sagv.trans_wm.min_ddb_alloc)__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", plane ->base.base.id, plane->base.name, old_wm->wm[0].min_ddb_alloc , old_wm->wm[1].min_ddb_alloc, old_wm->wm[2].min_ddb_alloc , old_wm->wm[3].min_ddb_alloc, old_wm->wm[4].min_ddb_alloc , old_wm->wm[5].min_ddb_alloc, old_wm->wm[6].min_ddb_alloc , old_wm->wm[7].min_ddb_alloc, old_wm->trans_wm.min_ddb_alloc , old_wm->sagv.wm0.min_ddb_alloc, old_wm->sagv.trans_wm .min_ddb_alloc, new_wm->wm[0].min_ddb_alloc, new_wm->wm [1].min_ddb_alloc, new_wm->wm[2].min_ddb_alloc, new_wm-> wm[3].min_ddb_alloc, new_wm->wm[4].min_ddb_alloc, new_wm-> wm[5].min_ddb_alloc, new_wm->wm[6].min_ddb_alloc, new_wm-> wm[7].min_ddb_alloc, new_wm->trans_wm.min_ddb_alloc, new_wm ->sagv.wm0.min_ddb_alloc, new_wm->sagv.trans_wm.min_ddb_alloc ); |
2685 | } |
2686 | } |
2687 | } |
2688 | |
2689 | static bool_Bool skl_plane_selected_wm_equals(struct intel_plane *plane, |
2690 | const struct skl_pipe_wm *old_pipe_wm, |
2691 | const struct skl_pipe_wm *new_pipe_wm) |
2692 | { |
2693 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(plane->base.dev); |
2694 | int level, max_level = ilk_wm_max_level(i915); |
2695 | |
2696 | for (level = 0; level <= max_level; level++) { |
2697 | /* |
2698 | * We don't check uv_wm as the hardware doesn't actually |
2699 | * use it. It only gets used for calculating the required |
2700 | * ddb allocation. |
2701 | */ |
2702 | if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level), |
2703 | skl_plane_wm_level(new_pipe_wm, plane->id, level))) |
2704 | return false0; |
2705 | } |
2706 | |
2707 | if (HAS_HW_SAGV_WM(i915)(((&(i915)->__runtime)->display.ip.ver) >= 13 && !((&(i915)->__info)->is_dgfx))) { |
2708 | const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id]; |
2709 | const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id]; |
2710 | |
2711 | if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) || |
2712 | !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm)) |
2713 | return false0; |
2714 | } |
2715 | |
2716 | return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id), |
2717 | skl_plane_trans_wm(new_pipe_wm, plane->id)); |
2718 | } |
2719 | |
2720 | /* |
2721 | * To make sure the cursor watermark registers are always consistent |
2722 | * with our computed state the following scenario needs special |
2723 | * treatment: |
2724 | * |
2725 | * 1. enable cursor |
2726 | * 2. move cursor entirely offscreen |
2727 | * 3. disable cursor |
2728 | * |
2729 | * Step 2. does call .disable_plane() but does not zero the watermarks |
2730 | * (since we consider an offscreen cursor still active for the purposes |
2731 | * of watermarks). Step 3. would not normally call .disable_plane() |
2732 | * because the actual plane visibility isn't changing, and we don't |
2733 | * deallocate the cursor ddb until the pipe gets disabled. So we must |
2734 | * force step 3. to call .disable_plane() to update the watermark |
2735 | * registers properly. |
2736 | * |
2737 | * Other planes do not suffer from this issues as their watermarks are |
2738 | * calculated based on the actual plane visibility. The only time this |
2739 | * can trigger for the other planes is during the initial readout as the |
2740 | * default value of the watermarks registers is not zero. |
2741 | */ |
2742 | static int skl_wm_add_affected_planes(struct intel_atomic_state *state, |
2743 | struct intel_crtc *crtc) |
2744 | { |
2745 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
2746 | const struct intel_crtc_state *old_crtc_state = |
2747 | intel_atomic_get_old_crtc_state(state, crtc); |
2748 | struct intel_crtc_state *new_crtc_state = |
2749 | intel_atomic_get_new_crtc_state(state, crtc); |
2750 | struct intel_plane *plane; |
2751 | |
2752 | for_each_intel_plane_on_crtc(&i915->drm, crtc, plane)for (plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = ((&(&i915->drm)->mode_config. plane_list)->next); (__typeof(*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base.head) );}); &plane ->base.head != (&(&i915->drm)->mode_config.plane_list ); plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = (plane->base.head.next); (__typeof(*plane ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base .head) );})) if (!((plane)->pipe == (crtc)->pipe)) {} else { |
2753 | struct intel_plane_state *plane_state; |
2754 | enum plane_id plane_id = plane->id; |
2755 | |
2756 | /* |
2757 | * Force a full wm update for every plane on modeset. |
2758 | * Required because the reset value of the wm registers |
2759 | * is non-zero, whereas we want all disabled planes to |
2760 | * have zero watermarks. So if we turn off the relevant |
2761 | * power well the hardware state will go out of sync |
2762 | * with the software state. |
2763 | */ |
2764 | if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) && |
2765 | skl_plane_selected_wm_equals(plane, |
2766 | &old_crtc_state->wm.skl.optimal, |
2767 | &new_crtc_state->wm.skl.optimal)) |
2768 | continue; |
2769 | |
2770 | plane_state = intel_atomic_get_plane_state(state, plane); |
2771 | if (IS_ERR(plane_state)) |
2772 | return PTR_ERR(plane_state); |
2773 | |
2774 | new_crtc_state->update_planes |= BIT(plane_id)(1UL << (plane_id)); |
2775 | } |
2776 | |
2777 | return 0; |
2778 | } |
2779 | |
2780 | static int |
2781 | skl_compute_wm(struct intel_atomic_state *state) |
2782 | { |
2783 | struct intel_crtc *crtc; |
2784 | struct intel_crtc_state *new_crtc_state; |
2785 | int ret, i; |
2786 | |
2787 | for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else { |
2788 | ret = skl_build_pipe_wm(state, crtc); |
2789 | if (ret) |
2790 | return ret; |
2791 | } |
2792 | |
2793 | ret = skl_compute_ddb(state); |
2794 | if (ret) |
2795 | return ret; |
2796 | |
2797 | ret = intel_compute_sagv_mask(state); |
2798 | if (ret) |
2799 | return ret; |
2800 | |
2801 | /* |
2802 | * skl_compute_ddb() will have adjusted the final watermarks |
2803 | * based on how much ddb is available. Now we can actually |
2804 | * check if the final watermarks changed. |
2805 | */ |
2806 | for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else { |
2807 | ret = skl_wm_add_affected_planes(state, crtc); |
2808 | if (ret) |
2809 | return ret; |
2810 | } |
2811 | |
2812 | skl_print_wm_changes(state); |
2813 | |
2814 | return 0; |
2815 | } |
2816 | |
2817 | static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) |
2818 | { |
2819 | level->enable = val & PLANE_WM_EN(1 << 31); |
2820 | level->ignore_lines = val & PLANE_WM_IGNORE_LINES(1 << 30); |
2821 | level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (11) - 1)) & ((~0UL) << (0))) + 0))))(((val) & (((u32)((((~0UL) >> (64 - (11) - 1)) & ((~0UL) << (0))) + 0)) )) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - ( 11) - 1)) & ((~0UL) << (0))) + 0))) - 1)))); |
2822 | level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (26) - 1)) & ((~0UL) << (14))) + 0))))(((val) & (((u32)((((~0UL ) >> (64 - (26) - 1)) & ((~0UL) << (14))) + 0 )))) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - (26) - 1)) & ((~0UL) << (14))) + 0))) - 1)))); |
2823 | } |
2824 | |
2825 | static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, |
2826 | struct skl_pipe_wm *out) |
2827 | { |
2828 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
2829 | enum pipe pipe = crtc->pipe; |
2830 | int level, max_level; |
2831 | enum plane_id plane_id; |
2832 | u32 val; |
2833 | |
2834 | max_level = ilk_wm_max_level(i915); |
2835 | |
2836 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { |
2837 | struct skl_plane_wm *wm = &out->planes[plane_id]; |
2838 | |
2839 | for (level = 0; level <= max_level; level++) { |
2840 | if (plane_id != PLANE_CURSOR) |
2841 | val = intel_uncore_read(&i915->uncore, PLANE_WM(pipe, plane_id, level)((const i915_reg_t){ .reg = (((((0x70240) + (pipe) * ((0x71240 ) - (0x70240)))) + (plane_id) * ((((0x70340) + (pipe) * ((0x71340 ) - (0x70340)))) - (((0x70240) + (pipe) * ((0x71240) - (0x70240 )))))) + ((4) * (level))) })); |
2842 | else |
2843 | val = intel_uncore_read(&i915->uncore, CUR_WM(pipe, level)((const i915_reg_t){ .reg = (((0x70140) + (pipe) * ((0x71140) - (0x70140))) + ((4) * (level))) })); |
2844 | |
2845 | skl_wm_level_from_reg_val(val, &wm->wm[level]); |
2846 | } |
2847 | |
2848 | if (plane_id != PLANE_CURSOR) |
2849 | val = intel_uncore_read(&i915->uncore, PLANE_WM_TRANS(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x70268) + (pipe) * ((0x71268 ) - (0x70268)))) + (plane_id) * ((((0x70368) + (pipe) * ((0x71368 ) - (0x70368)))) - (((0x70268) + (pipe) * ((0x71268) - (0x70268 ))))))) })); |
2850 | else |
2851 | val = intel_uncore_read(&i915->uncore, CUR_WM_TRANS(pipe)((const i915_reg_t){ .reg = (((0x70168) + (pipe) * ((0x71168) - (0x70168)))) })); |
2852 | |
2853 | skl_wm_level_from_reg_val(val, &wm->trans_wm); |
2854 | |
2855 | if (HAS_HW_SAGV_WM(i915)(((&(i915)->__runtime)->display.ip.ver) >= 13 && !((&(i915)->__info)->is_dgfx))) { |
2856 | if (plane_id != PLANE_CURSOR) |
2857 | val = intel_uncore_read(&i915->uncore, |
2858 | PLANE_WM_SAGV(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x70258) + (pipe) * ((0x71258 ) - (0x70258)))) + (plane_id) * ((((0x70358) + (pipe) * ((0x71358 ) - (0x70358)))) - (((0x70258) + (pipe) * ((0x71258) - (0x70258 ))))))) })); |
2859 | else |
2860 | val = intel_uncore_read(&i915->uncore, |
2861 | CUR_WM_SAGV(pipe)((const i915_reg_t){ .reg = (((0x70158) + (pipe) * ((0x71158) - (0x70158)))) })); |
2862 | |
2863 | skl_wm_level_from_reg_val(val, &wm->sagv.wm0); |
2864 | |
2865 | if (plane_id != PLANE_CURSOR) |
2866 | val = intel_uncore_read(&i915->uncore, |
2867 | PLANE_WM_SAGV_TRANS(pipe, plane_id)((const i915_reg_t){ .reg = (((((0x7025C) + (pipe) * ((0x7125C ) - (0x7025C)))) + (plane_id) * ((((0x7035C) + (pipe) * ((0x7135C ) - (0x7035C)))) - (((0x7025C) + (pipe) * ((0x7125C) - (0x7025C ))))))) })); |
2868 | else |
2869 | val = intel_uncore_read(&i915->uncore, |
2870 | CUR_WM_SAGV_TRANS(pipe)((const i915_reg_t){ .reg = (((0x7015C) + (pipe) * ((0x7115C) - (0x7015C)))) })); |
2871 | |
2872 | skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm); |
2873 | } else if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 12) { |
2874 | wm->sagv.wm0 = wm->wm[0]; |
2875 | wm->sagv.trans_wm = wm->trans_wm; |
2876 | } |
2877 | } |
2878 | } |
2879 | |
2880 | void skl_wm_get_hw_state(struct drm_i915_privateinteldrm_softc *i915) |
2881 | { |
2882 | struct intel_dbuf_state *dbuf_state = |
2883 | to_intel_dbuf_state(i915->display.dbuf.obj.state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((i915->display.dbuf.obj.state)); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
2884 | struct intel_crtc *crtc; |
2885 | |
2886 | if (HAS_MBUS_JOINING(i915)(IS_PLATFORM(i915, INTEL_ALDERLAKE_P) || ((&(i915)->__runtime )->display.ip.ver) >= 14)) |
2887 | dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL((const i915_reg_t){ .reg = (0x4438C) })) & MBUS_JOIN((u32)((1UL << (31)) + 0)); |
2888 | |
2889 | for_each_intel_crtc(&i915->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&i915->drm)->mode_config. crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&i915->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) *__mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) );})) { |
2890 | struct intel_crtc_state *crtc_state = |
2891 | to_intel_crtc_state(crtc->base.state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (crtc->base.state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}); |
2892 | enum pipe pipe = crtc->pipe; |
2893 | unsigned int mbus_offset; |
2894 | enum plane_id plane_id; |
2895 | u8 slices; |
2896 | |
2897 | memset(&crtc_state->wm.skl.optimal, 0,__builtin_memset((&crtc_state->wm.skl.optimal), (0), ( sizeof(crtc_state->wm.skl.optimal))) |
2898 | sizeof(crtc_state->wm.skl.optimal))__builtin_memset((&crtc_state->wm.skl.optimal), (0), ( sizeof(crtc_state->wm.skl.optimal))); |
2899 | if (crtc_state->hw.active) |
2900 | skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); |
2901 | crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal; |
2902 | |
2903 | memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]))__builtin_memset((&dbuf_state->ddb[pipe]), (0), (sizeof (dbuf_state->ddb[pipe]))); |
2904 | |
2905 | for_each_plane_id_on_crtc(crtc, plane_id)for ((plane_id) = PLANE_PRIMARY; (plane_id) < I915_MAX_PLANES ; (plane_id)++) if (!((crtc)->plane_ids_mask & (1UL << (plane_id)))) {} else { |
2906 | struct skl_ddb_entry *ddb = |
2907 | &crtc_state->wm.skl.plane_ddb[plane_id]; |
2908 | struct skl_ddb_entry *ddb_y = |
2909 | &crtc_state->wm.skl.plane_ddb_y[plane_id]; |
2910 | |
2911 | if (!crtc_state->hw.active) |
2912 | continue; |
2913 | |
2914 | skl_ddb_get_hw_plane_state(i915, crtc->pipe, |
2915 | plane_id, ddb, ddb_y); |
2916 | |
2917 | skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb); |
2918 | skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y); |
2919 | } |
2920 | |
2921 | dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state); |
2922 | |
2923 | /* |
2924 | * Used for checking overlaps, so we need absolute |
2925 | * offsets instead of MBUS relative offsets. |
2926 | */ |
2927 | slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, |
2928 | dbuf_state->joined_mbus); |
2929 | mbus_offset = mbus_ddb_offset(i915, slices); |
2930 | crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start; |
2931 | crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end; |
2932 | |
2933 | /* The slices actually used by the planes on the pipe */ |
2934 | dbuf_state->slices[pipe] = |
2935 | skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb); |
2936 | |
2937 | drm_dbg_kms(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n" , crtc->base.base.id, crtc->base.name, dbuf_state->slices [pipe], dbuf_state->ddb[pipe].start, dbuf_state->ddb[pipe ].end, dbuf_state->active_pipes, str_yes_no(dbuf_state-> joined_mbus)) |
2938 | "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n" , crtc->base.base.id, crtc->base.name, dbuf_state->slices [pipe], dbuf_state->ddb[pipe].start, dbuf_state->ddb[pipe ].end, dbuf_state->active_pipes, str_yes_no(dbuf_state-> joined_mbus)) |
2939 | crtc->base.base.id, crtc->base.name,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n" , crtc->base.base.id, crtc->base.name, dbuf_state->slices [pipe], dbuf_state->ddb[pipe].start, dbuf_state->ddb[pipe ].end, dbuf_state->active_pipes, str_yes_no(dbuf_state-> joined_mbus)) |
2940 | dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n" , crtc->base.base.id, crtc->base.name, dbuf_state->slices [pipe], dbuf_state->ddb[pipe].start, dbuf_state->ddb[pipe ].end, dbuf_state->active_pipes, str_yes_no(dbuf_state-> joined_mbus)) |
2941 | dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n" , crtc->base.base.id, crtc->base.name, dbuf_state->slices [pipe], dbuf_state->ddb[pipe].start, dbuf_state->ddb[pipe ].end, dbuf_state->active_pipes, str_yes_no(dbuf_state-> joined_mbus)) |
2942 | str_yes_no(dbuf_state->joined_mbus))__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n" , crtc->base.base.id, crtc->base.name, dbuf_state->slices [pipe], dbuf_state->ddb[pipe].start, dbuf_state->ddb[pipe ].end, dbuf_state->active_pipes, str_yes_no(dbuf_state-> joined_mbus)); |
2943 | } |
2944 | |
2945 | dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices; |
2946 | } |
2947 | |
2948 | static bool_Bool skl_dbuf_is_misconfigured(struct drm_i915_privateinteldrm_softc *i915) |
2949 | { |
2950 | const struct intel_dbuf_state *dbuf_state = |
2951 | to_intel_dbuf_state(i915->display.dbuf.obj.state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((i915->display.dbuf.obj.state)); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
2952 | struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; |
2953 | struct intel_crtc *crtc; |
2954 | |
2955 | for_each_intel_crtc(&i915->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&i915->drm)->mode_config. crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&i915->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) *__mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) );})) { |
2956 | const struct intel_crtc_state *crtc_state = |
2957 | to_intel_crtc_state(crtc->base.state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (crtc->base.state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}); |
2958 | |
2959 | entries[crtc->pipe] = crtc_state->wm.skl.ddb; |
2960 | } |
2961 | |
2962 | for_each_intel_crtc(&i915->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&i915->drm)->mode_config. crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&i915->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) *__mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) );})) { |
2963 | const struct intel_crtc_state *crtc_state = |
2964 | to_intel_crtc_state(crtc->base.state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (crtc->base.state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}); |
2965 | u8 slices; |
2966 | |
2967 | slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, |
2968 | dbuf_state->joined_mbus); |
2969 | if (dbuf_state->slices[crtc->pipe] & ~slices) |
2970 | return true1; |
2971 | |
2972 | if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries, |
2973 | I915_MAX_PIPES, crtc->pipe)) |
2974 | return true1; |
2975 | } |
2976 | |
2977 | return false0; |
2978 | } |
2979 | |
2980 | void skl_wm_sanitize(struct drm_i915_privateinteldrm_softc *i915) |
2981 | { |
2982 | struct intel_crtc *crtc; |
2983 | |
2984 | /* |
2985 | * On TGL/RKL (at least) the BIOS likes to assign the planes |
2986 | * to the wrong DBUF slices. This will cause an infinite loop |
2987 | * in skl_commit_modeset_enables() as it can't find a way to |
2988 | * transition between the old bogus DBUF layout to the new |
2989 | * proper DBUF layout without DBUF allocation overlaps between |
2990 | * the planes (which cannot be allowed or else the hardware |
2991 | * may hang). If we detect a bogus DBUF layout just turn off |
2992 | * all the planes so that skl_commit_modeset_enables() can |
2993 | * simply ignore them. |
2994 | */ |
2995 | if (!skl_dbuf_is_misconfigured(i915)) |
2996 | return; |
2997 | |
2998 | drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_KMS, "BIOS has misprogrammed the DBUF, disabling all planes\n" ); |
2999 | |
3000 | for_each_intel_crtc(&i915->drm, crtc)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base .head ) *__mptr = ((&(&i915->drm)->mode_config. crtc_list)->next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof (__typeof(*crtc), base.head) );}); &crtc->base.head != (&(&i915->drm)->mode_config.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->base.head ) *__mptr = (crtc->base.head.next); (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), base.head) );})) { |
3001 | struct intel_plane *plane = to_intel_plane(crtc->base.primary)({ const __typeof( ((struct intel_plane *)0)->base ) *__mptr = (crtc->base.primary); (struct intel_plane *)( (char *)__mptr - __builtin_offsetof(struct intel_plane, base) );}); |
3002 | const struct intel_plane_state *plane_state = |
3003 | to_intel_plane_state(plane->base.state)({ const __typeof( ((struct intel_plane_state *)0)->uapi ) *__mptr = (plane->base.state); (struct intel_plane_state * )( (char *)__mptr - __builtin_offsetof(struct intel_plane_state , uapi) );}); |
3004 | struct intel_crtc_state *crtc_state = |
3005 | to_intel_crtc_state(crtc->base.state)({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) * __mptr = (crtc->base.state); (struct intel_crtc_state *)( ( char *)__mptr - __builtin_offsetof(struct intel_crtc_state, uapi ) );}); |
3006 | |
3007 | if (plane_state->uapi.visible) |
3008 | intel_plane_disable_noatomic(crtc, plane); |
3009 | |
3010 | drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0)({ int __ret = !!((crtc_state->active_planes != 0)); if (__ret ) printf("%s %s: " "%s", dev_driver_string(((&i915->drm ))->dev), "", "drm_WARN_ON(" "crtc_state->active_planes != 0" ")"); __builtin_expect(!!(__ret), 0); }); |
3011 | |
3012 | memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb))__builtin_memset((&crtc_state->wm.skl.ddb), (0), (sizeof (crtc_state->wm.skl.ddb))); |
3013 | } |
3014 | } |
3015 | |
3016 | void intel_wm_state_verify(struct intel_crtc *crtc, |
3017 | struct intel_crtc_state *new_crtc_state) |
3018 | { |
3019 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(crtc->base.dev); |
3020 | struct skl_hw_state { |
3021 | struct skl_ddb_entry ddb[I915_MAX_PLANES]; |
3022 | struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; |
3023 | struct skl_pipe_wm wm; |
3024 | } *hw; |
3025 | const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal; |
3026 | int level, max_level = ilk_wm_max_level(i915); |
3027 | struct intel_plane *plane; |
3028 | u8 hw_enabled_slices; |
3029 | |
3030 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) < 9 || !new_crtc_state->hw.active) |
3031 | return; |
3032 | |
3033 | hw = kzalloc(sizeof(*hw), GFP_KERNEL(0x0001 | 0x0004)); |
3034 | if (!hw) |
3035 | return; |
3036 | |
3037 | skl_pipe_wm_get_hw_state(crtc, &hw->wm); |
3038 | |
3039 | skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y); |
3040 | |
3041 | hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915); |
3042 | |
3043 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 11 && |
3044 | hw_enabled_slices != i915->display.dbuf.enabled_slices) |
3045 | drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , i915-> display.dbuf.enabled_slices, hw_enabled_slices) |
3046 | "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , i915-> display.dbuf.enabled_slices, hw_enabled_slices) |
3047 | i915->display.dbuf.enabled_slices,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , i915-> display.dbuf.enabled_slices, hw_enabled_slices) |
3048 | hw_enabled_slices)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , i915-> display.dbuf.enabled_slices, hw_enabled_slices); |
3049 | |
3050 | for_each_intel_plane_on_crtc(&i915->drm, crtc, plane)for (plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = ((&(&i915->drm)->mode_config. plane_list)->next); (__typeof(*plane) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base.head) );}); &plane ->base.head != (&(&i915->drm)->mode_config.plane_list ); plane = ({ const __typeof( ((__typeof(*plane) *)0)->base .head ) *__mptr = (plane->base.head.next); (__typeof(*plane ) *)( (char *)__mptr - __builtin_offsetof(__typeof(*plane), base .head) );})) if (!((plane)->pipe == (crtc)->pipe)) {} else { |
3051 | const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; |
3052 | const struct skl_wm_level *hw_wm_level, *sw_wm_level; |
3053 | |
3054 | /* Watermarks */ |
3055 | for (level = 0; level <= max_level; level++) { |
3056 | hw_wm_level = &hw->wm.planes[plane->id].wm[level]; |
3057 | sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); |
3058 | |
3059 | if (skl_wm_level_equals(hw_wm_level, sw_wm_level)) |
3060 | continue; |
3061 | |
3062 | drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, level, sw_wm_level->enable , sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level-> enable, hw_wm_level->blocks, hw_wm_level->lines) |
3063 | "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, level, sw_wm_level->enable , sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level-> enable, hw_wm_level->blocks, hw_wm_level->lines) |
3064 | plane->base.base.id, plane->base.name, level,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, level, sw_wm_level->enable , sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level-> enable, hw_wm_level->blocks, hw_wm_level->lines) |
3065 | sw_wm_level->enable,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, level, sw_wm_level->enable , sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level-> enable, hw_wm_level->blocks, hw_wm_level->lines) |
3066 | sw_wm_level->blocks,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, level, sw_wm_level->enable , sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level-> enable, hw_wm_level->blocks, hw_wm_level->lines) |
3067 | sw_wm_level->lines,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, level, sw_wm_level->enable , sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level-> enable, hw_wm_level->blocks, hw_wm_level->lines) |
3068 | hw_wm_level->enable,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, level, sw_wm_level->enable , sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level-> enable, hw_wm_level->blocks, hw_wm_level->lines) |
3069 | hw_wm_level->blocks,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, level, sw_wm_level->enable , sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level-> enable, hw_wm_level->blocks, hw_wm_level->lines) |
3070 | hw_wm_level->lines)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, level, sw_wm_level->enable , sw_wm_level->blocks, sw_wm_level->lines, hw_wm_level-> enable, hw_wm_level->blocks, hw_wm_level->lines); |
3071 | } |
3072 | |
3073 | hw_wm_level = &hw->wm.planes[plane->id].trans_wm; |
3074 | sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id); |
3075 | |
3076 | if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) { |
3077 | drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3078 | "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3079 | plane->base.base.id, plane->base.name,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3080 | sw_wm_level->enable,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3081 | sw_wm_level->blocks,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3082 | sw_wm_level->lines,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3083 | hw_wm_level->enable,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3084 | hw_wm_level->blocks,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3085 | hw_wm_level->lines)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines); |
3086 | } |
3087 | |
3088 | hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0; |
3089 | sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0; |
3090 | |
3091 | if (HAS_HW_SAGV_WM(i915)(((&(i915)->__runtime)->display.ip.ver) >= 13 && !((&(i915)->__info)->is_dgfx)) && |
3092 | !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { |
3093 | drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3094 | "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3095 | plane->base.base.id, plane->base.name,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3096 | sw_wm_level->enable,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3097 | sw_wm_level->blocks,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3098 | sw_wm_level->lines,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3099 | hw_wm_level->enable,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3100 | hw_wm_level->blocks,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3101 | hw_wm_level->lines)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines); |
3102 | } |
3103 | |
3104 | hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm; |
3105 | sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm; |
3106 | |
3107 | if (HAS_HW_SAGV_WM(i915)(((&(i915)->__runtime)->display.ip.ver) >= 13 && !((&(i915)->__info)->is_dgfx)) && |
3108 | !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { |
3109 | drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3110 | "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3111 | plane->base.base.id, plane->base.name,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3112 | sw_wm_level->enable,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3113 | sw_wm_level->blocks,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3114 | sw_wm_level->lines,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3115 | hw_wm_level->enable,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3116 | hw_wm_level->blocks,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines) |
3117 | hw_wm_level->lines)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_wm_level->enable, sw_wm_level ->blocks, sw_wm_level->lines, hw_wm_level->enable, hw_wm_level ->blocks, hw_wm_level->lines); |
3118 | } |
3119 | |
3120 | /* DDB */ |
3121 | hw_ddb_entry = &hw->ddb[PLANE_CURSOR]; |
3122 | sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR]; |
3123 | |
3124 | if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { |
3125 | drm_err(&i915->drm,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_ddb_entry->start, sw_ddb_entry ->end, hw_ddb_entry->start, hw_ddb_entry->end) |
3126 | "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_ddb_entry->start, sw_ddb_entry ->end, hw_ddb_entry->start, hw_ddb_entry->end) |
3127 | plane->base.base.id, plane->base.name,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_ddb_entry->start, sw_ddb_entry ->end, hw_ddb_entry->start, hw_ddb_entry->end) |
3128 | sw_ddb_entry->start, sw_ddb_entry->end,printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_ddb_entry->start, sw_ddb_entry ->end, hw_ddb_entry->start, hw_ddb_entry->end) |
3129 | hw_ddb_entry->start, hw_ddb_entry->end)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , plane-> base.base.id, plane->base.name, sw_ddb_entry->start, sw_ddb_entry ->end, hw_ddb_entry->start, hw_ddb_entry->end); |
3130 | } |
3131 | } |
3132 | |
3133 | kfree(hw); |
3134 | } |
3135 | |
3136 | bool_Bool skl_watermark_ipc_enabled(struct drm_i915_privateinteldrm_softc *i915) |
3137 | { |
3138 | return i915->display.wm.ipc_enabled; |
3139 | } |
3140 | |
3141 | void skl_watermark_ipc_update(struct drm_i915_privateinteldrm_softc *i915) |
3142 | { |
3143 | if (!HAS_IPC(i915)((&(i915)->__info)->display.has_ipc)) |
3144 | return; |
3145 | |
3146 | intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL2((const i915_reg_t){ .reg = (0x45004) }), DISP_IPC_ENABLE(1 << 3), |
3147 | skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE(1 << 3) : 0); |
3148 | } |
3149 | |
3150 | static bool_Bool skl_watermark_ipc_can_enable(struct drm_i915_privateinteldrm_softc *i915) |
3151 | { |
3152 | /* Display WA #0477 WaDisableIPC: skl */ |
3153 | if (IS_SKYLAKE(i915)IS_PLATFORM(i915, INTEL_SKYLAKE)) |
3154 | return false0; |
3155 | |
3156 | /* Display WA #1141: SKL:all KBL:all CFL */ |
3157 | if (IS_KABYLAKE(i915)IS_PLATFORM(i915, INTEL_KABYLAKE) || |
3158 | IS_COFFEELAKE(i915)IS_PLATFORM(i915, INTEL_COFFEELAKE) || |
3159 | IS_COMETLAKE(i915)IS_PLATFORM(i915, INTEL_COMETLAKE)) |
3160 | return i915->dram_info.symmetric_memory; |
3161 | |
3162 | return true1; |
3163 | } |
3164 | |
3165 | void skl_watermark_ipc_init(struct drm_i915_privateinteldrm_softc *i915) |
3166 | { |
3167 | if (!HAS_IPC(i915)((&(i915)->__info)->display.has_ipc)) |
3168 | return; |
3169 | |
3170 | i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915); |
3171 | |
3172 | skl_watermark_ipc_update(i915); |
3173 | } |
3174 | |
3175 | static void |
3176 | adjust_wm_latency(struct drm_i915_privateinteldrm_softc *i915, |
3177 | u16 wm[], int max_level, int read_latency) |
3178 | { |
3179 | bool_Bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed; |
3180 | int i, level; |
3181 | |
3182 | /* |
3183 | * If a level n (n > 1) has a 0us latency, all levels m (m >= n) |
3184 | * need to be disabled. We make sure to sanitize the values out |
3185 | * of the punit to satisfy this requirement. |
3186 | */ |
3187 | for (level = 1; level <= max_level; level++) { |
3188 | if (wm[level] == 0) { |
3189 | for (i = level + 1; i <= max_level; i++) |
3190 | wm[i] = 0; |
3191 | |
3192 | max_level = level - 1; |
3193 | break; |
3194 | } |
3195 | } |
3196 | |
3197 | /* |
3198 | * WaWmMemoryReadLatency |
3199 | * |
3200 | * punit doesn't take into account the read latency so we need |
3201 | * to add proper adjustement to each valid level we retrieve |
3202 | * from the punit when level 0 response data is 0us. |
3203 | */ |
3204 | if (wm[0] == 0) { |
3205 | for (level = 0; level <= max_level; level++) |
3206 | wm[level] += read_latency; |
3207 | } |
3208 | |
3209 | /* |
3210 | * WA Level-0 adjustment for 16GB DIMMs: SKL+ |
3211 | * If we could not get dimm info enable this WA to prevent from |
3212 | * any underrun. If not able to get Dimm info assume 16GB dimm |
3213 | * to avoid any underrun. |
3214 | */ |
3215 | if (wm_lv_0_adjust_needed) |
3216 | wm[0] += 1; |
3217 | } |
3218 | |
3219 | static void mtl_read_wm_latency(struct drm_i915_privateinteldrm_softc *i915, u16 wm[]) |
3220 | { |
3221 | struct intel_uncore *uncore = &i915->uncore; |
3222 | int max_level = ilk_wm_max_level(i915); |
3223 | u32 val; |
3224 | |
3225 | val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1((const i915_reg_t){ .reg = (0x45780) })); |
3226 | wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (0))) + 0))))(((val) & (((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (0))) + 0)) )) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - ( 12) - 1)) & ((~0UL) << (0))) + 0))) - 1)))); |
3227 | wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (28) - 1)) & ((~0UL) << (16))) + 0))))(((val) & (((u32)((((~0UL ) >> (64 - (28) - 1)) & ((~0UL) << (16))) + 0 )))) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - (28) - 1)) & ((~0UL) << (16))) + 0))) - 1)))); |
3228 | |
3229 | val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3((const i915_reg_t){ .reg = (0x45784) })); |
3230 | wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (0))) + 0))))(((val) & (((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (0))) + 0)) )) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - ( 12) - 1)) & ((~0UL) << (0))) + 0))) - 1)))); |
3231 | wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (28) - 1)) & ((~0UL) << (16))) + 0))))(((val) & (((u32)((((~0UL ) >> (64 - (28) - 1)) & ((~0UL) << (16))) + 0 )))) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - (28) - 1)) & ((~0UL) << (16))) + 0))) - 1)))); |
3232 | |
3233 | val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5((const i915_reg_t){ .reg = (0x45788) })); |
3234 | wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (0))) + 0))))(((val) & (((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (0))) + 0)) )) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - ( 12) - 1)) & ((~0UL) << (0))) + 0))) - 1)))); |
3235 | wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (28) - 1)) & ((~0UL) << (16))) + 0))))(((val) & (((u32)((((~0UL ) >> (64 - (28) - 1)) & ((~0UL) << (16))) + 0 )))) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - (28) - 1)) & ((~0UL) << (16))) + 0))) - 1)))); |
3236 | |
3237 | adjust_wm_latency(i915, wm, max_level, 6); |
3238 | } |
3239 | |
3240 | static void skl_read_wm_latency(struct drm_i915_privateinteldrm_softc *i915, u16 wm[]) |
3241 | { |
3242 | int max_level = ilk_wm_max_level(i915); |
3243 | int read_latency = DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 12 ? 3 : 2; |
3244 | int mult = IS_DG2(i915)IS_PLATFORM(i915, INTEL_DG2) ? 2 : 1; |
3245 | u32 val; |
3246 | int ret; |
3247 | |
3248 | /* read the first set of memory latencies[0:3] */ |
3249 | val = 0; /* data0 to be programmed to 0 for first set */ |
3250 | ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY0x6, &val, NULL((void *)0)); |
3251 | if (ret) { |
3252 | drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "SKL Mailbox read error = %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ret); |
3253 | return; |
3254 | } |
3255 | |
3256 | wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL) << (0))) + 0))))(((val) & (((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL) << (0))) + 0))) ) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - (7 ) - 1)) & ((~0UL) << (0))) + 0))) - 1)))) * mult; |
3257 | wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (8))) + 0))))(((val) & (((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (8))) + 0)) )) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - ( 15) - 1)) & ((~0UL) << (8))) + 0))) - 1)))) * mult; |
3258 | wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (16))) + 0))))(((val) & (((u32)((((~0UL ) >> (64 - (23) - 1)) & ((~0UL) << (16))) + 0 )))) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (16))) + 0))) - 1)))) * mult; |
3259 | wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (31) - 1)) & ((~0UL) << (24))) + 0))))(((val) & (((u32)((((~0UL ) >> (64 - (31) - 1)) & ((~0UL) << (24))) + 0 )))) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - (31) - 1)) & ((~0UL) << (24))) + 0))) - 1)))) * mult; |
3260 | |
3261 | /* read the second set of memory latencies[4:7] */ |
3262 | val = 1; /* data0 to be programmed to 1 for second set */ |
3263 | ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY0x6, &val, NULL((void *)0)); |
3264 | if (ret) { |
3265 | drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret)printf("drm:pid%d:%s *ERROR* " "[drm] " "*ERROR* " "SKL Mailbox read error = %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ret); |
3266 | return; |
3267 | } |
3268 | |
3269 | wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL) << (0))) + 0))))(((val) & (((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL) << (0))) + 0))) ) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - (7 ) - 1)) & ((~0UL) << (0))) + 0))) - 1)))) * mult; |
3270 | wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (8))) + 0))))(((val) & (((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (8))) + 0)) )) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - ( 15) - 1)) & ((~0UL) << (8))) + 0))) - 1)))) * mult; |
3271 | wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (16))) + 0))))(((val) & (((u32)((((~0UL ) >> (64 - (23) - 1)) & ((~0UL) << (16))) + 0 )))) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - (23) - 1)) & ((~0UL) << (16))) + 0))) - 1)))) * mult; |
3272 | wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val)((u32)((typeof(((u32)((((~0UL) >> (64 - (31) - 1)) & ((~0UL) << (24))) + 0))))(((val) & (((u32)((((~0UL ) >> (64 - (31) - 1)) & ((~0UL) << (24))) + 0 )))) >> (__builtin_ffsll(((u32)((((~0UL) >> (64 - (31) - 1)) & ((~0UL) << (24))) + 0))) - 1)))) * mult; |
3273 | |
3274 | adjust_wm_latency(i915, wm, max_level, read_latency); |
3275 | } |
3276 | |
3277 | static void skl_setup_wm_latency(struct drm_i915_privateinteldrm_softc *i915) |
3278 | { |
3279 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 14) |
3280 | mtl_read_wm_latency(i915, i915->display.wm.skl_latency); |
3281 | else |
3282 | skl_read_wm_latency(i915, i915->display.wm.skl_latency); |
3283 | |
3284 | intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency); |
3285 | } |
3286 | |
3287 | static const struct intel_wm_funcs skl_wm_funcs = { |
3288 | .compute_global_watermarks = skl_compute_wm, |
3289 | }; |
3290 | |
3291 | void skl_wm_init(struct drm_i915_privateinteldrm_softc *i915) |
3292 | { |
3293 | intel_sagv_init(i915); |
3294 | |
3295 | skl_setup_wm_latency(i915); |
3296 | |
3297 | i915->display.funcs.wm = &skl_wm_funcs; |
3298 | } |
3299 | |
3300 | static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj) |
3301 | { |
3302 | struct intel_dbuf_state *dbuf_state; |
3303 | |
3304 | dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL(0x0001 | 0x0004)); |
3305 | if (!dbuf_state) |
3306 | return NULL((void *)0); |
3307 | |
3308 | return &dbuf_state->base; |
3309 | } |
3310 | |
3311 | static void intel_dbuf_destroy_state(struct intel_global_obj *obj, |
3312 | struct intel_global_state *state) |
3313 | { |
3314 | kfree(state); |
3315 | } |
3316 | |
3317 | static const struct intel_global_state_funcs intel_dbuf_funcs = { |
3318 | .atomic_duplicate_state = intel_dbuf_duplicate_state, |
3319 | .atomic_destroy_state = intel_dbuf_destroy_state, |
3320 | }; |
3321 | |
3322 | struct intel_dbuf_state * |
3323 | intel_atomic_get_dbuf_state(struct intel_atomic_state *state) |
3324 | { |
3325 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
3326 | struct intel_global_state *dbuf_state; |
3327 | |
3328 | dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj); |
3329 | if (IS_ERR(dbuf_state)) |
3330 | return ERR_CAST(dbuf_state); |
3331 | |
3332 | return to_intel_dbuf_state(dbuf_state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((dbuf_state)); (struct intel_dbuf_state *)( (char * )__mptr - __builtin_offsetof(struct intel_dbuf_state, base) ) ;}); |
3333 | } |
3334 | |
3335 | int intel_dbuf_init(struct drm_i915_privateinteldrm_softc *i915) |
3336 | { |
3337 | struct intel_dbuf_state *dbuf_state; |
3338 | |
3339 | dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL(0x0001 | 0x0004)); |
3340 | if (!dbuf_state) |
3341 | return -ENOMEM12; |
3342 | |
3343 | intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj, |
3344 | &dbuf_state->base, &intel_dbuf_funcs); |
3345 | |
3346 | return 0; |
3347 | } |
3348 | |
3349 | /* |
3350 | * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before |
3351 | * update the request state of all DBUS slices. |
3352 | */ |
3353 | static void update_mbus_pre_enable(struct intel_atomic_state *state) |
3354 | { |
3355 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
3356 | u32 mbus_ctl, dbuf_min_tracker_val; |
3357 | enum dbuf_slice slice; |
3358 | const struct intel_dbuf_state *dbuf_state = |
3359 | intel_atomic_get_new_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_new_global_obj_state(state, & to_i915(state->base.dev)->display.dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
3360 | |
3361 | if (!HAS_MBUS_JOINING(i915)(IS_PLATFORM(i915, INTEL_ALDERLAKE_P) || ((&(i915)->__runtime )->display.ip.ver) >= 14)) |
3362 | return; |
3363 | |
3364 | /* |
3365 | * TODO: Implement vblank synchronized MBUS joining changes. |
3366 | * Must be properly coordinated with dbuf reprogramming. |
3367 | */ |
3368 | if (dbuf_state->joined_mbus) { |
3369 | mbus_ctl = MBUS_HASHING_MODE_1x4((u32)((((typeof(((u32)((1UL << (30)) + 0))))(1) << (__builtin_ffsll(((u32)((1UL << (30)) + 0))) - 1)) & (((u32)((1UL << (30)) + 0)))) + 0 + 0 + 0 + 0)) | MBUS_JOIN((u32)((1UL << (31)) + 0)) | |
3370 | MBUS_JOIN_PIPE_SELECT_NONE((u32)((((typeof(((u32)((((~0UL) >> (64 - (28) - 1)) & ((~0UL) << (26))) + 0))))(7) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (28) - 1)) & ((~0UL) << (26))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (28 ) - 1)) & ((~0UL) << (26))) + 0)))) + 0 + 0 + 0 + 0 )); |
3371 | dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3)((u32)((((typeof(((u32)((((~0UL) >> (64 - (18) - 1)) & ((~0UL) << (16))) + 0))))(3) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (18) - 1)) & ((~0UL) << (16))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (18 ) - 1)) & ((~0UL) << (16))) + 0)))) + 0 + 0 + 0 + 0 )); |
3372 | } else { |
3373 | mbus_ctl = MBUS_HASHING_MODE_2x2((u32)((((typeof(((u32)((1UL << (30)) + 0))))(0) << (__builtin_ffsll(((u32)((1UL << (30)) + 0))) - 1)) & (((u32)((1UL << (30)) + 0)))) + 0 + 0 + 0 + 0)) | |
3374 | MBUS_JOIN_PIPE_SELECT_NONE((u32)((((typeof(((u32)((((~0UL) >> (64 - (28) - 1)) & ((~0UL) << (26))) + 0))))(7) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (28) - 1)) & ((~0UL) << (26))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (28 ) - 1)) & ((~0UL) << (26))) + 0)))) + 0 + 0 + 0 + 0 )); |
3375 | dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1)((u32)((((typeof(((u32)((((~0UL) >> (64 - (18) - 1)) & ((~0UL) << (16))) + 0))))(1) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (18) - 1)) & ((~0UL) << (16))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (18 ) - 1)) & ((~0UL) << (16))) + 0)))) + 0 + 0 + 0 + 0 )); |
3376 | } |
3377 | |
3378 | intel_de_rmw(i915, MBUS_CTL((const i915_reg_t){ .reg = (0x4438C) }), |
3379 | MBUS_HASHING_MODE_MASK((u32)((1UL << (30)) + 0)) | MBUS_JOIN((u32)((1UL << (31)) + 0)) | |
3380 | MBUS_JOIN_PIPE_SELECT_MASK((u32)((((~0UL) >> (64 - (28) - 1)) & ((~0UL) << (26))) + 0)), mbus_ctl); |
3381 | |
3382 | for_each_dbuf_slice(i915, slice)for ((slice) = DBUF_S1; (slice) < I915_MAX_DBUF_SLICES; (slice )++) if (!((&(i915)->__info)->display.dbuf.slice_mask & (1UL << (slice)))) {} else |
3383 | intel_de_rmw(i915, DBUF_CTL_S(slice)((const i915_reg_t){ .reg = ((((const u32 []){ 0x45008, 0x44FE8 , 0x44300, 0x44304 })[slice])) }), |
3384 | DBUF_MIN_TRACKER_STATE_SERVICE_MASK((u32)((((~0UL) >> (64 - (18) - 1)) & ((~0UL) << (16))) + 0)), |
3385 | dbuf_min_tracker_val); |
3386 | } |
3387 | |
3388 | void intel_dbuf_pre_plane_update(struct intel_atomic_state *state) |
3389 | { |
3390 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
3391 | const struct intel_dbuf_state *new_dbuf_state = |
3392 | intel_atomic_get_new_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_new_global_obj_state(state, & to_i915(state->base.dev)->display.dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
3393 | const struct intel_dbuf_state *old_dbuf_state = |
3394 | intel_atomic_get_old_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_old_global_obj_state(state, & to_i915(state->base.dev)->display.dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
3395 | |
3396 | if (!new_dbuf_state || |
3397 | (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices && |
3398 | new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)) |
3399 | return; |
3400 | |
3401 | WARN_ON(!new_dbuf_state->base.changed)({ int __ret = !!(!new_dbuf_state->base.changed); if (__ret ) printf("WARNING %s failed at %s:%d\n", "!new_dbuf_state->base.changed" , "/usr/src/sys/dev/pci/drm/i915/display/skl_watermark.c", 3401 ); __builtin_expect(!!(__ret), 0); }); |
3402 | |
3403 | update_mbus_pre_enable(state); |
3404 | gen9_dbuf_slices_update(i915, |
3405 | old_dbuf_state->enabled_slices | |
3406 | new_dbuf_state->enabled_slices); |
3407 | } |
3408 | |
3409 | void intel_dbuf_post_plane_update(struct intel_atomic_state *state) |
3410 | { |
3411 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
3412 | const struct intel_dbuf_state *new_dbuf_state = |
3413 | intel_atomic_get_new_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_new_global_obj_state(state, & to_i915(state->base.dev)->display.dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
3414 | const struct intel_dbuf_state *old_dbuf_state = |
3415 | intel_atomic_get_old_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_old_global_obj_state(state, & to_i915(state->base.dev)->display.dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
3416 | |
3417 | if (!new_dbuf_state || |
3418 | (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices && |
3419 | new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)) |
3420 | return; |
3421 | |
3422 | WARN_ON(!new_dbuf_state->base.changed)({ int __ret = !!(!new_dbuf_state->base.changed); if (__ret ) printf("WARNING %s failed at %s:%d\n", "!new_dbuf_state->base.changed" , "/usr/src/sys/dev/pci/drm/i915/display/skl_watermark.c", 3422 ); __builtin_expect(!!(__ret), 0); }); |
3423 | |
3424 | gen9_dbuf_slices_update(i915, |
3425 | new_dbuf_state->enabled_slices); |
3426 | } |
3427 | |
3428 | static bool_Bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes) |
3429 | { |
3430 | switch (pipe) { |
3431 | case PIPE_A: |
3432 | return !(active_pipes & BIT(PIPE_D)(1UL << (PIPE_D))); |
3433 | case PIPE_D: |
3434 | return !(active_pipes & BIT(PIPE_A)(1UL << (PIPE_A))); |
3435 | case PIPE_B: |
3436 | return !(active_pipes & BIT(PIPE_C)(1UL << (PIPE_C))); |
3437 | case PIPE_C: |
3438 | return !(active_pipes & BIT(PIPE_B)(1UL << (PIPE_B))); |
3439 | default: /* to suppress compiler warning */ |
3440 | MISSING_CASE(pipe)({ int __ret = !!(1); if (__ret) printf("Missing case (%s == %ld)\n" , "pipe", (long)(pipe)); __builtin_expect(!!(__ret), 0); }); |
3441 | break; |
3442 | } |
3443 | |
3444 | return false0; |
3445 | } |
3446 | |
3447 | void intel_mbus_dbox_update(struct intel_atomic_state *state) |
3448 | { |
3449 | struct drm_i915_privateinteldrm_softc *i915 = to_i915(state->base.dev); |
3450 | const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; |
3451 | const struct intel_crtc_state *new_crtc_state; |
3452 | const struct intel_crtc *crtc; |
3453 | u32 val = 0; |
3454 | int i; |
3455 | |
3456 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) < 11) |
3457 | return; |
3458 | |
3459 | new_dbuf_state = intel_atomic_get_new_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_new_global_obj_state(state, & to_i915(state->base.dev)->display.dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
3460 | old_dbuf_state = intel_atomic_get_old_dbuf_state(state)({ const __typeof( ((struct intel_dbuf_state *)0)->base ) * __mptr = ((intel_atomic_get_old_global_obj_state(state, & to_i915(state->base.dev)->display.dbuf.obj))); (struct intel_dbuf_state *)( (char *)__mptr - __builtin_offsetof(struct intel_dbuf_state , base) );}); |
3461 | if (!new_dbuf_state || |
3462 | (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus && |
3463 | new_dbuf_state->active_pipes == old_dbuf_state->active_pipes)) |
3464 | return; |
3465 | |
3466 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 14) |
3467 | val |= MBUS_DBOX_I_CREDIT(2)((u32)((((typeof(((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL) << (5))) + 0))))(2) << (__builtin_ffsll( ((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL) << (5))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL) << (5))) + 0)))) + 0 + 0 + 0 + 0)); |
3468 | |
3469 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 12) { |
3470 | val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16)((u32)((((typeof(((u32)((((~0UL) >> (64 - (24) - 1)) & ((~0UL) << (20))) + 0))))(16) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (24) - 1)) & ((~0UL) << (20))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (24 ) - 1)) & ((~0UL) << (20))) + 0)))) + 0 + 0 + 0 + 0 )); |
3471 | val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1)((u32)((((typeof(((u32)((((~0UL) >> (64 - (19) - 1)) & ((~0UL) << (17))) + 0))))(1) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (19) - 1)) & ((~0UL) << (17))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (19 ) - 1)) & ((~0UL) << (17))) + 0)))) + 0 + 0 + 0 + 0 )); |
3472 | val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN((u32)((1UL << (16)) + 0)); |
3473 | } |
3474 | |
3475 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 14) |
3476 | val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12)((u32)((((typeof(((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0))))(12) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0)))) + 0 + 0 + 0 + 0)) : |
3477 | MBUS_DBOX_A_CREDIT(8)((u32)((((typeof(((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0))))(8) << (__builtin_ffsll( ((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0)))) + 0 + 0 + 0 + 0)); |
3478 | else if (IS_ALDERLAKE_P(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_P)) |
3479 | /* Wa_22010947358:adl-p */ |
3480 | val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6)((u32)((((typeof(((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0))))(6) << (__builtin_ffsll( ((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0)))) + 0 + 0 + 0 + 0)) : |
3481 | MBUS_DBOX_A_CREDIT(4)((u32)((((typeof(((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0))))(4) << (__builtin_ffsll( ((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0)))) + 0 + 0 + 0 + 0)); |
3482 | else |
3483 | val |= MBUS_DBOX_A_CREDIT(2)((u32)((((typeof(((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0))))(2) << (__builtin_ffsll( ((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (3) - 1)) & ((~0UL) << (0))) + 0)))) + 0 + 0 + 0 + 0)); |
3484 | |
3485 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 14) { |
3486 | val |= MBUS_DBOX_B_CREDIT(0xA)((u32)((((typeof(((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (8))) + 0))))(0xA) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (8))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (12 ) - 1)) & ((~0UL) << (8))) + 0)))) + 0 + 0 + 0 + 0) ); |
3487 | } else if (IS_ALDERLAKE_P(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_P)) { |
3488 | val |= MBUS_DBOX_BW_CREDIT(2)((u32)((((typeof(((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (14))) + 0))))(2) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (14))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (15 ) - 1)) & ((~0UL) << (14))) + 0)))) + 0 + 0 + 0 + 0 )); |
3489 | val |= MBUS_DBOX_B_CREDIT(8)((u32)((((typeof(((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (8))) + 0))))(8) << (__builtin_ffsll( ((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (8))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (12 ) - 1)) & ((~0UL) << (8))) + 0)))) + 0 + 0 + 0 + 0) ); |
3490 | } else if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 12) { |
3491 | val |= MBUS_DBOX_BW_CREDIT(2)((u32)((((typeof(((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (14))) + 0))))(2) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (14))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (15 ) - 1)) & ((~0UL) << (14))) + 0)))) + 0 + 0 + 0 + 0 )); |
3492 | val |= MBUS_DBOX_B_CREDIT(12)((u32)((((typeof(((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (8))) + 0))))(12) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (8))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (12 ) - 1)) & ((~0UL) << (8))) + 0)))) + 0 + 0 + 0 + 0) ); |
3493 | } else { |
3494 | val |= MBUS_DBOX_BW_CREDIT(1)((u32)((((typeof(((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (14))) + 0))))(1) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (14))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (15 ) - 1)) & ((~0UL) << (14))) + 0)))) + 0 + 0 + 0 + 0 )); |
3495 | val |= MBUS_DBOX_B_CREDIT(8)((u32)((((typeof(((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (8))) + 0))))(8) << (__builtin_ffsll( ((u32)((((~0UL) >> (64 - (12) - 1)) & ((~0UL) << (8))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (12 ) - 1)) & ((~0UL) << (8))) + 0)))) + 0 + 0 + 0 + 0) ); |
3496 | } |
3497 | |
3498 | for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)for ((i) = 0; (i) < (state)->base.dev->mode_config.num_crtc && ((crtc) = ({ const __typeof( ((struct intel_crtc * )0)->base ) *__mptr = ((state)->base.crtcs[i].ptr); (struct intel_crtc *)( (char *)__mptr - __builtin_offsetof(struct intel_crtc , base) );}), (new_crtc_state) = ({ const __typeof( ((struct intel_crtc_state *)0)->uapi ) *__mptr = ((state)->base.crtcs[i].new_state ); (struct intel_crtc_state *)( (char *)__mptr - __builtin_offsetof (struct intel_crtc_state, uapi) );}), 1); (i)++) if (!(crtc)) {} else { |
3499 | u32 pipe_val = val; |
3500 | |
3501 | if (!new_crtc_state->hw.active) |
3502 | continue; |
3503 | |
3504 | if (DISPLAY_VER(i915)((&(i915)->__runtime)->display.ip.ver) >= 14) { |
3505 | if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, |
3506 | new_dbuf_state->active_pipes)) |
3507 | pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL((u32)((((typeof(((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (14))) + 0))))(0x3) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (14))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (15 ) - 1)) & ((~0UL) << (14))) + 0)))) + 0 + 0 + 0 + 0 )); |
3508 | else |
3509 | pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL((u32)((((typeof(((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (14))) + 0))))(0x2) << (__builtin_ffsll (((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (14))) + 0))) - 1)) & (((u32)((((~0UL) >> (64 - (15 ) - 1)) & ((~0UL) << (14))) + 0)))) + 0 + 0 + 0 + 0 )); |
3510 | } |
3511 | |
3512 | intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe)((const i915_reg_t){ .reg = (((0x7003C) + (crtc->pipe) * ( (0x7103C) - (0x7003C)))) }), pipe_val); |
3513 | } |
3514 | } |
3515 | |
3516 | #ifdef notyet |
3517 | |
3518 | static int skl_watermark_ipc_status_show(struct seq_file *m, void *data) |
3519 | { |
3520 | struct drm_i915_privateinteldrm_softc *i915 = m->private; |
3521 | |
3522 | seq_printf(m, "Isochronous Priority Control: %s\n", |
3523 | str_yes_no(skl_watermark_ipc_enabled(i915))); |
3524 | return 0; |
3525 | } |
3526 | |
3527 | static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file) |
3528 | { |
3529 | struct drm_i915_privateinteldrm_softc *i915 = inode->i_private; |
3530 | |
3531 | return single_open(file, skl_watermark_ipc_status_show, i915); |
3532 | } |
3533 | |
3534 | static ssize_t skl_watermark_ipc_status_write(struct file *file, |
3535 | const char __user *ubuf, |
3536 | size_t len, loff_t *offp) |
3537 | { |
3538 | struct seq_file *m = file->private_data; |
3539 | struct drm_i915_privateinteldrm_softc *i915 = m->private; |
3540 | intel_wakeref_t wakeref; |
3541 | bool_Bool enable; |
3542 | int ret; |
3543 | |
3544 | ret = kstrtobool_from_user(ubuf, len, &enable); |
3545 | if (ret < 0) |
3546 | return ret; |
3547 | |
3548 | with_intel_runtime_pm(&i915->runtime_pm, wakeref)for ((wakeref) = intel_runtime_pm_get(&i915->runtime_pm ); (wakeref); intel_runtime_pm_put((&i915->runtime_pm) , (wakeref)), (wakeref) = 0) { |
3549 | if (!skl_watermark_ipc_enabled(i915) && enable) |
3550 | drm_info(&i915->drm,do { } while(0) |
3551 | "Enabling IPC: WM will be proper only after next commit\n")do { } while(0); |
3552 | i915->display.wm.ipc_enabled = enable; |
3553 | skl_watermark_ipc_update(i915); |
3554 | } |
3555 | |
3556 | return len; |
3557 | } |
3558 | |
3559 | static const struct file_operations skl_watermark_ipc_status_fops = { |
3560 | .owner = THIS_MODULE((void *)0), |
3561 | .open = skl_watermark_ipc_status_open, |
3562 | .read = seq_read, |
3563 | .llseek = seq_lseek, |
3564 | .release = single_release, |
3565 | .write = skl_watermark_ipc_status_write |
3566 | }; |
3567 | |
3568 | #endif /* notyet */ |
3569 | |
3570 | void skl_watermark_ipc_debugfs_register(struct drm_i915_privateinteldrm_softc *i915) |
3571 | { |
3572 | struct drm_minor *minor = i915->drm.primary; |
Value stored to 'minor' during its initialization is never read | |
3573 | |
3574 | if (!HAS_IPC(i915)((&(i915)->__info)->display.has_ipc)) |
3575 | return; |
3576 | |
3577 | debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,ERR_PTR(-78) |
3578 | &skl_watermark_ipc_status_fops)ERR_PTR(-78); |
3579 | } |