File: | dev/pci/drm/amd/display/dc/core/amdgpu_dc.c |
Warning: | line 2582, column 8 Access to field 'flip_immediate' results in a dereference of a null pointer (loaded from variable 'plane_state') |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |||
3 | * | |||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
5 | * copy of this software and associated documentation files (the "Software"), | |||
6 | * to deal in the Software without restriction, including without limitation | |||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
8 | * and/or sell copies of the Software, and to permit persons to whom the | |||
9 | * Software is furnished to do so, subject to the following conditions: | |||
10 | * | |||
11 | * The above copyright notice and this permission notice shall be included in | |||
12 | * all copies or substantial portions of the Software. | |||
13 | * | |||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |||
20 | * OTHER DEALINGS IN THE SOFTWARE. | |||
21 | * | |||
22 | * Authors: AMD | |||
23 | */ | |||
24 | ||||
25 | #include <linux/slab.h> | |||
26 | #include <linux/mm.h> | |||
27 | ||||
28 | #include "dm_services.h" | |||
29 | ||||
30 | #include "dc.h" | |||
31 | ||||
32 | #include "core_status.h" | |||
33 | #include "core_types.h" | |||
34 | #include "hw_sequencer.h" | |||
35 | #include "dce/dce_hwseq.h" | |||
36 | ||||
37 | #include "resource.h" | |||
38 | ||||
39 | #include "clk_mgr.h" | |||
40 | #include "clock_source.h" | |||
41 | #include "dc_bios_types.h" | |||
42 | ||||
43 | #include "bios_parser_interface.h" | |||
44 | #include "include/irq_service_interface.h" | |||
45 | #include "transform.h" | |||
46 | #include "dmcu.h" | |||
47 | #include "dpp.h" | |||
48 | #include "timing_generator.h" | |||
49 | #include "abm.h" | |||
50 | #include "virtual/virtual_link_encoder.h" | |||
51 | ||||
52 | #include "link_hwss.h" | |||
53 | #include "link_encoder.h" | |||
54 | ||||
55 | #include "dc_link_ddc.h" | |||
56 | #include "dm_helpers.h" | |||
57 | #include "mem_input.h" | |||
58 | #include "hubp.h" | |||
59 | ||||
60 | #include "dc_link_dp.h" | |||
61 | #include "dc_dmub_srv.h" | |||
62 | ||||
63 | #include "dsc.h" | |||
64 | ||||
65 | #include "vm_helper.h" | |||
66 | ||||
67 | #include "dce/dce_i2c.h" | |||
68 | ||||
69 | #include "dmub/dmub_srv.h" | |||
70 | ||||
71 | #include "dce/dmub_hw_lock_mgr.h" | |||
72 | ||||
73 | #define CTXdc->ctx \ | |||
74 | dc->ctx | |||
75 | ||||
76 | #define DC_LOGGERdc->ctx->logger \ | |||
77 | dc->ctx->logger | |||
78 | ||||
79 | static const char DC_BUILD_ID[] = "production-build"; | |||
80 | ||||
81 | /** | |||
82 | * DOC: Overview | |||
83 | * | |||
84 | * DC is the OS-agnostic component of the amdgpu DC driver. | |||
85 | * | |||
86 | * DC maintains and validates a set of structs representing the state of the | |||
87 | * driver and writes that state to AMD hardware | |||
88 | * | |||
89 | * Main DC HW structs: | |||
90 | * | |||
91 | * struct dc - The central struct. One per driver. Created on driver load, | |||
92 | * destroyed on driver unload. | |||
93 | * | |||
94 | * struct dc_context - One per driver. | |||
95 | * Used as a backpointer by most other structs in dc. | |||
96 | * | |||
97 | * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP | |||
98 | * plugpoints). Created on driver load, destroyed on driver unload. | |||
99 | * | |||
100 | * struct dc_sink - One per display. Created on boot or hotplug. | |||
101 | * Destroyed on shutdown or hotunplug. A dc_link can have a local sink | |||
102 | * (the display directly attached). It may also have one or more remote | |||
103 | * sinks (in the Multi-Stream Transport case) | |||
104 | * | |||
105 | * struct resource_pool - One per driver. Represents the hw blocks not in the | |||
106 | * main pipeline. Not directly accessible by dm. | |||
107 | * | |||
108 | * Main dc state structs: | |||
109 | * | |||
110 | * These structs can be created and destroyed as needed. There is a full set of | |||
111 | * these structs in dc->current_state representing the currently programmed state. | |||
112 | * | |||
113 | * struct dc_state - The global DC state to track global state information, | |||
114 | * such as bandwidth values. | |||
115 | * | |||
116 | * struct dc_stream_state - Represents the hw configuration for the pipeline from | |||
117 | * a framebuffer to a display. Maps one-to-one with dc_sink. | |||
118 | * | |||
119 | * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, | |||
120 | * and may have more in the Multi-Plane Overlay case. | |||
121 | * | |||
122 | * struct resource_context - Represents the programmable state of everything in | |||
123 | * the resource_pool. Not directly accessible by dm. | |||
124 | * | |||
125 | * struct pipe_ctx - A member of struct resource_context. Represents the | |||
126 | * internal hardware pipeline components. Each dc_plane_state has either | |||
127 | * one or two (in the pipe-split case). | |||
128 | */ | |||
129 | ||||
130 | /******************************************************************************* | |||
131 | * Private functions | |||
132 | ******************************************************************************/ | |||
133 | ||||
134 | static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) | |||
135 | { | |||
136 | if (new > *original) | |||
137 | *original = new; | |||
138 | } | |||
139 | ||||
140 | static void destroy_links(struct dc *dc) | |||
141 | { | |||
142 | uint32_t i; | |||
143 | ||||
144 | for (i = 0; i < dc->link_count; i++) { | |||
145 | if (NULL((void *)0) != dc->links[i]) | |||
146 | link_destroy(&dc->links[i]); | |||
147 | } | |||
148 | } | |||
149 | ||||
150 | static bool_Bool create_links( | |||
151 | struct dc *dc, | |||
152 | uint32_t num_virtual_links) | |||
153 | { | |||
154 | int i; | |||
155 | int connectors_num; | |||
156 | struct dc_bios *bios = dc->ctx->dc_bios; | |||
157 | ||||
158 | dc->link_count = 0; | |||
159 | ||||
160 | connectors_num = bios->funcs->get_connectors_number(bios); | |||
161 | ||||
162 | if (connectors_num > ENUM_ID_COUNT) { | |||
163 | dm_error(__drm_err("DC: Number of connectors %d exceeds maximum of %d!\n" , connectors_num, ENUM_ID_COUNT) | |||
164 | "DC: Number of connectors %d exceeds maximum of %d!\n",__drm_err("DC: Number of connectors %d exceeds maximum of %d!\n" , connectors_num, ENUM_ID_COUNT) | |||
165 | connectors_num,__drm_err("DC: Number of connectors %d exceeds maximum of %d!\n" , connectors_num, ENUM_ID_COUNT) | |||
166 | ENUM_ID_COUNT)__drm_err("DC: Number of connectors %d exceeds maximum of %d!\n" , connectors_num, ENUM_ID_COUNT); | |||
167 | return false0; | |||
168 | } | |||
169 | ||||
170 | dm_output_to_console(__drm_dbg(DRM_UT_KMS, "DC: %s: connectors_num: physical:%d, virtual:%d\n" , __func__, connectors_num, num_virtual_links) | |||
171 | "DC: %s: connectors_num: physical:%d, virtual:%d\n",__drm_dbg(DRM_UT_KMS, "DC: %s: connectors_num: physical:%d, virtual:%d\n" , __func__, connectors_num, num_virtual_links) | |||
172 | __func__,__drm_dbg(DRM_UT_KMS, "DC: %s: connectors_num: physical:%d, virtual:%d\n" , __func__, connectors_num, num_virtual_links) | |||
173 | connectors_num,__drm_dbg(DRM_UT_KMS, "DC: %s: connectors_num: physical:%d, virtual:%d\n" , __func__, connectors_num, num_virtual_links) | |||
174 | num_virtual_links)__drm_dbg(DRM_UT_KMS, "DC: %s: connectors_num: physical:%d, virtual:%d\n" , __func__, connectors_num, num_virtual_links); | |||
175 | ||||
176 | for (i = 0; i < connectors_num; i++) { | |||
177 | struct link_init_data link_init_params = {0}; | |||
178 | struct dc_link *link; | |||
179 | ||||
180 | link_init_params.ctx = dc->ctx; | |||
181 | /* next BIOS object table connector */ | |||
182 | link_init_params.connector_index = i; | |||
183 | link_init_params.link_index = dc->link_count; | |||
184 | link_init_params.dc = dc; | |||
185 | link = link_create(&link_init_params); | |||
186 | ||||
187 | if (link) { | |||
188 | bool_Bool should_destory_link = false0; | |||
189 | ||||
190 | if (link->connector_signal == SIGNAL_TYPE_EDP) { | |||
191 | if (dc->config.edp_not_connected) { | |||
192 | if (!IS_DIAG_DC(dc->ctx->dce_environment)((dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) || ( dc->ctx->dce_environment == DCE_ENV_DIAG))) | |||
193 | should_destory_link = true1; | |||
194 | } else { | |||
195 | enum dc_connection_type type; | |||
196 | dc_link_detect_sink(link, &type); | |||
197 | if (type == dc_connection_none) | |||
198 | should_destory_link = true1; | |||
199 | } | |||
200 | } | |||
201 | ||||
202 | if (dc->config.force_enum_edp || !should_destory_link) { | |||
203 | dc->links[dc->link_count] = link; | |||
204 | link->dc = dc; | |||
205 | ++dc->link_count; | |||
206 | } else { | |||
207 | link_destroy(&link); | |||
208 | } | |||
209 | } | |||
210 | } | |||
211 | ||||
212 | for (i = 0; i < num_virtual_links; i++) { | |||
213 | struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL(0x0001 | 0x0004)); | |||
214 | struct encoder_init_data enc_init = {0}; | |||
215 | ||||
216 | if (link == NULL((void *)0)) { | |||
217 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 217); do {} while (0); } while (0); | |||
218 | goto failed_alloc; | |||
219 | } | |||
220 | ||||
221 | link->link_index = dc->link_count; | |||
222 | dc->links[dc->link_count] = link; | |||
223 | dc->link_count++; | |||
224 | ||||
225 | link->ctx = dc->ctx; | |||
226 | link->dc = dc; | |||
227 | link->connector_signal = SIGNAL_TYPE_VIRTUAL; | |||
228 | link->link_id.type = OBJECT_TYPE_CONNECTOR; | |||
229 | link->link_id.id = CONNECTOR_ID_VIRTUAL; | |||
230 | link->link_id.enum_id = ENUM_ID_1; | |||
231 | link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL(0x0001 | 0x0004)); | |||
232 | ||||
233 | if (!link->link_enc) { | |||
234 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 234); do {} while (0); } while (0); | |||
235 | goto failed_alloc; | |||
236 | } | |||
237 | ||||
238 | link->link_status.dpcd_caps = &link->dpcd_caps; | |||
239 | ||||
240 | enc_init.ctx = dc->ctx; | |||
241 | enc_init.channel = CHANNEL_ID_UNKNOWN; | |||
242 | enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; | |||
243 | enc_init.transmitter = TRANSMITTER_UNKNOWN; | |||
244 | enc_init.connector = link->link_id; | |||
245 | enc_init.encoder.type = OBJECT_TYPE_ENCODER; | |||
246 | enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; | |||
247 | enc_init.encoder.enum_id = ENUM_ID_1; | |||
248 | virtual_link_encoder_construct(link->link_enc, &enc_init); | |||
249 | } | |||
250 | ||||
251 | return true1; | |||
252 | ||||
253 | failed_alloc: | |||
254 | return false0; | |||
255 | } | |||
256 | ||||
257 | static struct dc_perf_trace *dc_perf_trace_create(void) | |||
258 | { | |||
259 | return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL(0x0001 | 0x0004)); | |||
260 | } | |||
261 | ||||
262 | static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) | |||
263 | { | |||
264 | kfree(*perf_trace); | |||
265 | *perf_trace = NULL((void *)0); | |||
266 | } | |||
267 | ||||
268 | /** | |||
269 | ***************************************************************************** | |||
270 | * Function: dc_stream_adjust_vmin_vmax | |||
271 | * | |||
272 | * @brief | |||
273 | * Looks up the pipe context of dc_stream_state and updates the | |||
274 | * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh | |||
275 | * Rate, which is a power-saving feature that targets reducing panel | |||
276 | * refresh rate while the screen is static | |||
277 | * | |||
278 | * @param [in] dc: dc reference | |||
279 | * @param [in] stream: Initial dc stream state | |||
280 | * @param [in] adjust: Updated parameters for vertical_total_min and | |||
281 | * vertical_total_max | |||
282 | ***************************************************************************** | |||
283 | */ | |||
284 | bool_Bool dc_stream_adjust_vmin_vmax(struct dc *dc, | |||
285 | struct dc_stream_state *stream, | |||
286 | struct dc_crtc_timing_adjust *adjust) | |||
287 | { | |||
288 | int i = 0; | |||
289 | bool_Bool ret = false0; | |||
290 | ||||
291 | stream->adjust = *adjust; | |||
292 | ||||
293 | for (i = 0; i < MAX_PIPES6; i++) { | |||
294 | struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; | |||
295 | ||||
296 | if (pipe->stream == stream && pipe->stream_res.tg) { | |||
297 | dc->hwss.set_drr(&pipe, | |||
298 | 1, | |||
299 | adjust->v_total_min, | |||
300 | adjust->v_total_max, | |||
301 | adjust->v_total_mid, | |||
302 | adjust->v_total_mid_frame_num); | |||
303 | ||||
304 | ret = true1; | |||
305 | } | |||
306 | } | |||
307 | return ret; | |||
308 | } | |||
309 | ||||
310 | bool_Bool dc_stream_get_crtc_position(struct dc *dc, | |||
311 | struct dc_stream_state **streams, int num_streams, | |||
312 | unsigned int *v_pos, unsigned int *nom_v_pos) | |||
313 | { | |||
314 | /* TODO: Support multiple streams */ | |||
315 | const struct dc_stream_state *stream = streams[0]; | |||
316 | int i = 0; | |||
317 | bool_Bool ret = false0; | |||
318 | struct crtc_position position; | |||
319 | ||||
320 | for (i = 0; i < MAX_PIPES6; i++) { | |||
321 | struct pipe_ctx *pipe = | |||
322 | &dc->current_state->res_ctx.pipe_ctx[i]; | |||
323 | ||||
324 | if (pipe->stream == stream && pipe->stream_res.stream_enc) { | |||
325 | dc->hwss.get_position(&pipe, 1, &position); | |||
326 | ||||
327 | *v_pos = position.vertical_count; | |||
328 | *nom_v_pos = position.nominal_vcount; | |||
329 | ret = true1; | |||
330 | } | |||
331 | } | |||
332 | return ret; | |||
333 | } | |||
334 | ||||
335 | /** | |||
336 | * dc_stream_configure_crc() - Configure CRC capture for the given stream. | |||
337 | * @dc: DC Object | |||
338 | * @stream: The stream to configure CRC on. | |||
339 | * @enable: Enable CRC if true, disable otherwise. | |||
340 | * @continuous: Capture CRC on every frame if true. Otherwise, only capture | |||
341 | * once. | |||
342 | * | |||
343 | * By default, only CRC0 is configured, and the entire frame is used to | |||
344 | * calculate the crc. | |||
345 | */ | |||
346 | bool_Bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, | |||
347 | bool_Bool enable, bool_Bool continuous) | |||
348 | { | |||
349 | int i; | |||
350 | struct pipe_ctx *pipe; | |||
351 | struct crc_params param; | |||
352 | struct timing_generator *tg; | |||
353 | ||||
354 | for (i = 0; i < MAX_PIPES6; i++) { | |||
355 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; | |||
356 | if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) | |||
357 | break; | |||
358 | } | |||
359 | /* Stream not found */ | |||
360 | if (i == MAX_PIPES6) | |||
361 | return false0; | |||
362 | ||||
363 | /* Always capture the full frame */ | |||
364 | param.windowa_x_start = 0; | |||
365 | param.windowa_y_start = 0; | |||
366 | param.windowa_x_end = pipe->stream->timing.h_addressable; | |||
367 | param.windowa_y_end = pipe->stream->timing.v_addressable; | |||
368 | param.windowb_x_start = 0; | |||
369 | param.windowb_y_start = 0; | |||
370 | param.windowb_x_end = pipe->stream->timing.h_addressable; | |||
371 | param.windowb_y_end = pipe->stream->timing.v_addressable; | |||
372 | ||||
373 | param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; | |||
374 | param.odm_mode = pipe->next_odm_pipe ? 1:0; | |||
375 | ||||
376 | /* Default to the union of both windows */ | |||
377 | param.selection = UNION_WINDOW_A_B; | |||
378 | param.continuous_mode = continuous; | |||
379 | param.enable = enable; | |||
380 | ||||
381 | tg = pipe->stream_res.tg; | |||
382 | ||||
383 | /* Only call if supported */ | |||
384 | if (tg->funcs->configure_crc) | |||
385 | return tg->funcs->configure_crc(tg, ¶m); | |||
386 | DC_LOG_WARNING("CRC capture not supported.")printk("\0014" "[" "drm" "] " "CRC capture not supported."); | |||
387 | return false0; | |||
388 | } | |||
389 | ||||
390 | /** | |||
391 | * dc_stream_get_crc() - Get CRC values for the given stream. | |||
392 | * @dc: DC object | |||
393 | * @stream: The DC stream state of the stream to get CRCs from. | |||
394 | * @r_cr, g_y, b_cb: CRC values for the three channels are stored here. | |||
395 | * | |||
396 | * dc_stream_configure_crc needs to be called beforehand to enable CRCs. | |||
397 | * Return false if stream is not found, or if CRCs are not enabled. | |||
398 | */ | |||
399 | bool_Bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, | |||
400 | uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) | |||
401 | { | |||
402 | int i; | |||
403 | struct pipe_ctx *pipe; | |||
404 | struct timing_generator *tg; | |||
405 | ||||
406 | for (i = 0; i < MAX_PIPES6; i++) { | |||
407 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; | |||
408 | if (pipe->stream == stream) | |||
409 | break; | |||
410 | } | |||
411 | /* Stream not found */ | |||
412 | if (i == MAX_PIPES6) | |||
413 | return false0; | |||
414 | ||||
415 | tg = pipe->stream_res.tg; | |||
416 | ||||
417 | if (tg->funcs->get_crc) | |||
418 | return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); | |||
419 | DC_LOG_WARNING("CRC capture not supported.")printk("\0014" "[" "drm" "] " "CRC capture not supported."); | |||
420 | return false0; | |||
421 | } | |||
422 | ||||
423 | void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, | |||
424 | enum dc_dynamic_expansion option) | |||
425 | { | |||
426 | /* OPP FMT dyn expansion updates*/ | |||
427 | int i = 0; | |||
428 | struct pipe_ctx *pipe_ctx; | |||
429 | ||||
430 | for (i = 0; i < MAX_PIPES6; i++) { | |||
431 | if (dc->current_state->res_ctx.pipe_ctx[i].stream | |||
432 | == stream) { | |||
433 | pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; | |||
434 | pipe_ctx->stream_res.opp->dyn_expansion = option; | |||
435 | pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( | |||
436 | pipe_ctx->stream_res.opp, | |||
437 | COLOR_SPACE_YCBCR601, | |||
438 | stream->timing.display_color_depth, | |||
439 | stream->signal); | |||
440 | } | |||
441 | } | |||
442 | } | |||
443 | ||||
444 | void dc_stream_set_dither_option(struct dc_stream_state *stream, | |||
445 | enum dc_dither_option option) | |||
446 | { | |||
447 | struct bit_depth_reduction_params params; | |||
448 | struct dc_link *link = stream->link; | |||
449 | struct pipe_ctx *pipes = NULL((void *)0); | |||
450 | int i; | |||
451 | ||||
452 | for (i = 0; i < MAX_PIPES6; i++) { | |||
453 | if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == | |||
454 | stream) { | |||
455 | pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; | |||
456 | break; | |||
457 | } | |||
458 | } | |||
459 | ||||
460 | if (!pipes) | |||
461 | return; | |||
462 | if (option > DITHER_OPTION_MAX) | |||
463 | return; | |||
464 | ||||
465 | stream->dither_option = option; | |||
466 | ||||
467 | memset(¶ms, 0, sizeof(params))__builtin_memset((¶ms), (0), (sizeof(params))); | |||
468 | resource_build_bit_depth_reduction_params(stream, ¶ms); | |||
469 | stream->bit_depth_params = params; | |||
470 | ||||
471 | if (pipes->plane_res.xfm && | |||
472 | pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { | |||
473 | pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( | |||
474 | pipes->plane_res.xfm, | |||
475 | pipes->plane_res.scl_data.lb_params.depth, | |||
476 | &stream->bit_depth_params); | |||
477 | } | |||
478 | ||||
479 | pipes->stream_res.opp->funcs-> | |||
480 | opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); | |||
481 | } | |||
482 | ||||
483 | bool_Bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) | |||
484 | { | |||
485 | int i = 0; | |||
486 | bool_Bool ret = false0; | |||
487 | struct pipe_ctx *pipes; | |||
488 | ||||
489 | for (i = 0; i < MAX_PIPES6; i++) { | |||
490 | if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { | |||
491 | pipes = &dc->current_state->res_ctx.pipe_ctx[i]; | |||
492 | dc->hwss.program_gamut_remap(pipes); | |||
493 | ret = true1; | |||
494 | } | |||
495 | } | |||
496 | ||||
497 | return ret; | |||
498 | } | |||
499 | ||||
500 | bool_Bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) | |||
501 | { | |||
502 | int i = 0; | |||
503 | bool_Bool ret = false0; | |||
504 | struct pipe_ctx *pipes; | |||
505 | ||||
506 | for (i = 0; i < MAX_PIPES6; i++) { | |||
507 | if (dc->current_state->res_ctx.pipe_ctx[i].stream | |||
508 | == stream) { | |||
509 | ||||
510 | pipes = &dc->current_state->res_ctx.pipe_ctx[i]; | |||
511 | dc->hwss.program_output_csc(dc, | |||
512 | pipes, | |||
513 | stream->output_color_space, | |||
514 | stream->csc_color_matrix.matrix, | |||
515 | pipes->stream_res.opp->inst); | |||
516 | ret = true1; | |||
517 | } | |||
518 | } | |||
519 | ||||
520 | return ret; | |||
521 | } | |||
522 | ||||
523 | void dc_stream_set_static_screen_params(struct dc *dc, | |||
524 | struct dc_stream_state **streams, | |||
525 | int num_streams, | |||
526 | const struct dc_static_screen_params *params) | |||
527 | { | |||
528 | int i = 0; | |||
529 | int j = 0; | |||
530 | struct pipe_ctx *pipes_affected[MAX_PIPES6]; | |||
531 | int num_pipes_affected = 0; | |||
532 | ||||
533 | for (i = 0; i < num_streams; i++) { | |||
534 | struct dc_stream_state *stream = streams[i]; | |||
535 | ||||
536 | for (j = 0; j < MAX_PIPES6; j++) { | |||
537 | if (dc->current_state->res_ctx.pipe_ctx[j].stream | |||
538 | == stream) { | |||
539 | pipes_affected[num_pipes_affected++] = | |||
540 | &dc->current_state->res_ctx.pipe_ctx[j]; | |||
541 | } | |||
542 | } | |||
543 | } | |||
544 | ||||
545 | dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); | |||
546 | } | |||
547 | ||||
548 | static void dc_destruct(struct dc *dc) | |||
549 | { | |||
550 | if (dc->current_state) { | |||
551 | dc_release_state(dc->current_state); | |||
552 | dc->current_state = NULL((void *)0); | |||
553 | } | |||
554 | ||||
555 | destroy_links(dc); | |||
556 | ||||
557 | if (dc->clk_mgr) { | |||
558 | dc_destroy_clk_mgr(dc->clk_mgr); | |||
559 | dc->clk_mgr = NULL((void *)0); | |||
560 | } | |||
561 | ||||
562 | dc_destroy_resource_pool(dc); | |||
563 | ||||
564 | if (dc->ctx->gpio_service) | |||
565 | dal_gpio_service_destroy(&dc->ctx->gpio_service); | |||
566 | ||||
567 | if (dc->ctx->created_bios) | |||
568 | dal_bios_parser_destroy(&dc->ctx->dc_bios); | |||
569 | ||||
570 | dc_perf_trace_destroy(&dc->ctx->perf_trace); | |||
571 | ||||
572 | kfree(dc->ctx); | |||
573 | dc->ctx = NULL((void *)0); | |||
574 | ||||
575 | kfree(dc->bw_vbios); | |||
576 | dc->bw_vbios = NULL((void *)0); | |||
577 | ||||
578 | kfree(dc->bw_dceip); | |||
579 | dc->bw_dceip = NULL((void *)0); | |||
580 | ||||
581 | #ifdef CONFIG_DRM_AMD_DC_DCN1 | |||
582 | kfree(dc->dcn_soc); | |||
583 | dc->dcn_soc = NULL((void *)0); | |||
584 | ||||
585 | kfree(dc->dcn_ip); | |||
586 | dc->dcn_ip = NULL((void *)0); | |||
587 | ||||
588 | #endif | |||
589 | kfree(dc->vm_helper); | |||
590 | dc->vm_helper = NULL((void *)0); | |||
591 | ||||
592 | } | |||
593 | ||||
594 | static bool_Bool dc_construct_ctx(struct dc *dc, | |||
595 | const struct dc_init_data *init_params) | |||
596 | { | |||
597 | struct dc_context *dc_ctx; | |||
598 | enum dce_version dc_version = DCE_VERSION_UNKNOWN; | |||
599 | ||||
600 | dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL(0x0001 | 0x0004)); | |||
601 | if (!dc_ctx) | |||
602 | return false0; | |||
603 | ||||
604 | dc_ctx->cgs_device = init_params->cgs_device; | |||
605 | dc_ctx->driver_context = init_params->driver; | |||
606 | dc_ctx->dc = dc; | |||
607 | dc_ctx->asic_id = init_params->asic_id; | |||
608 | dc_ctx->dc_sink_id_count = 0; | |||
609 | dc_ctx->dc_stream_id_count = 0; | |||
610 | dc_ctx->dce_environment = init_params->dce_environment; | |||
611 | ||||
612 | /* Create logger */ | |||
613 | ||||
614 | dc_version = resource_parse_asic_id(init_params->asic_id); | |||
615 | dc_ctx->dce_version = dc_version; | |||
616 | ||||
617 | dc_ctx->perf_trace = dc_perf_trace_create(); | |||
618 | if (!dc_ctx->perf_trace) { | |||
619 | ASSERT_CRITICAL(false)do { if (({ int __ret = !!(!(0)); if (__ret) printf("WARNING %s failed at %s:%d\n" , "!(0)", "/usr/src/sys/dev/pci/drm/amd/display/dc/core/amdgpu_dc.c" , 619); __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); | |||
620 | return false0; | |||
621 | } | |||
622 | ||||
623 | dc->ctx = dc_ctx; | |||
624 | ||||
625 | return true1; | |||
626 | } | |||
627 | ||||
628 | static bool_Bool dc_construct(struct dc *dc, | |||
629 | const struct dc_init_data *init_params) | |||
630 | { | |||
631 | struct dc_context *dc_ctx; | |||
632 | struct bw_calcs_dceip *dc_dceip; | |||
633 | struct bw_calcs_vbios *dc_vbios; | |||
634 | #ifdef CONFIG_DRM_AMD_DC_DCN1 | |||
635 | struct dcn_soc_bounding_box *dcn_soc; | |||
636 | struct dcn_ip_params *dcn_ip; | |||
637 | #endif | |||
638 | ||||
639 | dc->config = init_params->flags; | |||
640 | ||||
641 | // Allocate memory for the vm_helper | |||
642 | dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL(0x0001 | 0x0004)); | |||
643 | if (!dc->vm_helper) { | |||
644 | dm_error("%s: failed to create dc->vm_helper\n", __func__)__drm_err("%s: failed to create dc->vm_helper\n", __func__ ); | |||
645 | goto fail; | |||
646 | } | |||
647 | ||||
648 | memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides))__builtin_memcpy((&dc->bb_overrides), (&init_params ->bb_overrides), (sizeof(dc->bb_overrides))); | |||
649 | ||||
650 | dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL(0x0001 | 0x0004)); | |||
651 | if (!dc_dceip) { | |||
652 | dm_error("%s: failed to create dceip\n", __func__)__drm_err("%s: failed to create dceip\n", __func__); | |||
653 | goto fail; | |||
654 | } | |||
655 | ||||
656 | dc->bw_dceip = dc_dceip; | |||
657 | ||||
658 | dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL(0x0001 | 0x0004)); | |||
659 | if (!dc_vbios) { | |||
660 | dm_error("%s: failed to create vbios\n", __func__)__drm_err("%s: failed to create vbios\n", __func__); | |||
661 | goto fail; | |||
662 | } | |||
663 | ||||
664 | dc->bw_vbios = dc_vbios; | |||
665 | #ifdef CONFIG_DRM_AMD_DC_DCN1 | |||
666 | dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL(0x0001 | 0x0004)); | |||
667 | if (!dcn_soc) { | |||
668 | dm_error("%s: failed to create dcn_soc\n", __func__)__drm_err("%s: failed to create dcn_soc\n", __func__); | |||
669 | goto fail; | |||
670 | } | |||
671 | ||||
672 | dc->dcn_soc = dcn_soc; | |||
673 | ||||
674 | dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL(0x0001 | 0x0004)); | |||
675 | if (!dcn_ip) { | |||
676 | dm_error("%s: failed to create dcn_ip\n", __func__)__drm_err("%s: failed to create dcn_ip\n", __func__); | |||
677 | goto fail; | |||
678 | } | |||
679 | ||||
680 | dc->dcn_ip = dcn_ip; | |||
681 | dc->soc_bounding_box = init_params->soc_bounding_box; | |||
682 | #endif | |||
683 | ||||
684 | if (!dc_construct_ctx(dc, init_params)) { | |||
685 | dm_error("%s: failed to create ctx\n", __func__)__drm_err("%s: failed to create ctx\n", __func__); | |||
686 | goto fail; | |||
687 | } | |||
688 | ||||
689 | dc_ctx = dc->ctx; | |||
690 | ||||
691 | /* Resource should construct all asic specific resources. | |||
692 | * This should be the only place where we need to parse the asic id | |||
693 | */ | |||
694 | if (init_params->vbios_override) | |||
695 | dc_ctx->dc_bios = init_params->vbios_override; | |||
696 | else { | |||
697 | /* Create BIOS parser */ | |||
698 | struct bp_init_data bp_init_data; | |||
699 | ||||
700 | bp_init_data.ctx = dc_ctx; | |||
701 | bp_init_data.bios = init_params->asic_id.atombios_base_address; | |||
702 | ||||
703 | dc_ctx->dc_bios = dal_bios_parser_create( | |||
704 | &bp_init_data, dc_ctx->dce_version); | |||
705 | ||||
706 | if (!dc_ctx->dc_bios) { | |||
707 | ASSERT_CRITICAL(false)do { if (({ int __ret = !!(!(0)); if (__ret) printf("WARNING %s failed at %s:%d\n" , "!(0)", "/usr/src/sys/dev/pci/drm/amd/display/dc/core/amdgpu_dc.c" , 707); __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); | |||
708 | goto fail; | |||
709 | } | |||
710 | ||||
711 | dc_ctx->created_bios = true1; | |||
712 | } | |||
713 | ||||
714 | dc->vendor_signature = init_params->vendor_signature; | |||
715 | ||||
716 | /* Create GPIO service */ | |||
717 | dc_ctx->gpio_service = dal_gpio_service_create( | |||
718 | dc_ctx->dce_version, | |||
719 | dc_ctx->dce_environment, | |||
720 | dc_ctx); | |||
721 | ||||
722 | if (!dc_ctx->gpio_service) { | |||
723 | ASSERT_CRITICAL(false)do { if (({ int __ret = !!(!(0)); if (__ret) printf("WARNING %s failed at %s:%d\n" , "!(0)", "/usr/src/sys/dev/pci/drm/amd/display/dc/core/amdgpu_dc.c" , 723); __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); | |||
724 | goto fail; | |||
725 | } | |||
726 | ||||
727 | dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); | |||
728 | if (!dc->res_pool) | |||
729 | goto fail; | |||
730 | ||||
731 | dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); | |||
732 | if (!dc->clk_mgr) | |||
733 | goto fail; | |||
734 | #ifdef CONFIG_DRM_AMD_DC_DCN3_01 | |||
735 | dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; | |||
736 | #endif | |||
737 | ||||
738 | dc->debug.force_ignore_link_settings = init_params->force_ignore_link_settings; | |||
739 | ||||
740 | if (dc->res_pool->funcs->update_bw_bounding_box) | |||
741 | dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); | |||
742 | ||||
743 | /* Creation of current_state must occur after dc->dml | |||
744 | * is initialized in dc_create_resource_pool because | |||
745 | * on creation it copies the contents of dc->dml | |||
746 | */ | |||
747 | ||||
748 | dc->current_state = dc_create_state(dc); | |||
749 | ||||
750 | if (!dc->current_state) { | |||
751 | dm_error("%s: failed to create validate ctx\n", __func__)__drm_err("%s: failed to create validate ctx\n", __func__); | |||
752 | goto fail; | |||
753 | } | |||
754 | ||||
755 | dc_resource_state_construct(dc, dc->current_state); | |||
756 | ||||
757 | if (!create_links(dc, init_params->num_virtual_links)) | |||
758 | goto fail; | |||
759 | ||||
760 | return true1; | |||
761 | ||||
762 | fail: | |||
763 | return false0; | |||
764 | } | |||
765 | ||||
766 | static bool_Bool disable_all_writeback_pipes_for_stream( | |||
767 | const struct dc *dc, | |||
768 | struct dc_stream_state *stream, | |||
769 | struct dc_state *context) | |||
770 | { | |||
771 | int i; | |||
772 | ||||
773 | for (i = 0; i < stream->num_wb_info; i++) | |||
774 | stream->writeback_info[i].wb_enabled = false0; | |||
775 | ||||
776 | return true1; | |||
777 | } | |||
778 | ||||
779 | void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool_Bool lock) | |||
780 | { | |||
781 | int i = 0; | |||
782 | ||||
783 | /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ | |||
784 | if (dc->hwss.interdependent_update_lock) | |||
785 | dc->hwss.interdependent_update_lock(dc, context, lock); | |||
786 | else { | |||
787 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |||
788 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | |||
789 | struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; | |||
790 | ||||
791 | // Copied conditions that were previously in dce110_apply_ctx_for_surface | |||
792 | if (stream == pipe_ctx->stream) { | |||
793 | if (!pipe_ctx->top_pipe && | |||
794 | (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) | |||
795 | dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); | |||
796 | } | |||
797 | } | |||
798 | } | |||
799 | } | |||
800 | ||||
801 | static void disable_dangling_plane(struct dc *dc, struct dc_state *context) | |||
802 | { | |||
803 | int i, j; | |||
804 | struct dc_state *dangling_context = dc_create_state(dc); | |||
805 | struct dc_state *current_ctx; | |||
806 | ||||
807 | if (dangling_context == NULL((void *)0)) | |||
808 | return; | |||
809 | ||||
810 | dc_resource_state_copy_construct(dc->current_state, dangling_context); | |||
811 | ||||
812 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |||
813 | struct dc_stream_state *old_stream = | |||
814 | dc->current_state->res_ctx.pipe_ctx[i].stream; | |||
815 | bool_Bool should_disable = true1; | |||
816 | ||||
817 | for (j = 0; j < context->stream_count; j++) { | |||
818 | if (old_stream == context->streams[j]) { | |||
819 | should_disable = false0; | |||
820 | break; | |||
821 | } | |||
822 | } | |||
823 | if (should_disable && old_stream) { | |||
824 | dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); | |||
825 | disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); | |||
826 | ||||
827 | if (dc->hwss.apply_ctx_for_surface) { | |||
828 | apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true1); | |||
829 | dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); | |||
830 | apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false0); | |||
831 | dc->hwss.post_unlock_program_front_end(dc, dangling_context); | |||
832 | } | |||
833 | if (dc->hwss.program_front_end_for_ctx) { | |||
834 | dc->hwss.interdependent_update_lock(dc, dc->current_state, true1); | |||
835 | dc->hwss.program_front_end_for_ctx(dc, dangling_context); | |||
836 | dc->hwss.interdependent_update_lock(dc, dc->current_state, false0); | |||
837 | dc->hwss.post_unlock_program_front_end(dc, dangling_context); | |||
838 | } | |||
839 | } | |||
840 | } | |||
841 | ||||
842 | current_ctx = dc->current_state; | |||
843 | dc->current_state = dangling_context; | |||
844 | dc_release_state(current_ctx); | |||
845 | } | |||
846 | ||||
847 | static void disable_vbios_mode_if_required( | |||
848 | struct dc *dc, | |||
849 | struct dc_state *context) | |||
850 | { | |||
851 | unsigned int i, j; | |||
852 | ||||
853 | /* check if timing_changed, disable stream*/ | |||
854 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |||
855 | struct dc_stream_state *stream = NULL((void *)0); | |||
856 | struct dc_link *link = NULL((void *)0); | |||
857 | struct pipe_ctx *pipe = NULL((void *)0); | |||
858 | ||||
859 | pipe = &context->res_ctx.pipe_ctx[i]; | |||
860 | stream = pipe->stream; | |||
861 | if (stream == NULL((void *)0)) | |||
862 | continue; | |||
863 | ||||
864 | if (stream->link->local_sink && | |||
865 | stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { | |||
866 | link = stream->link; | |||
867 | } | |||
868 | ||||
869 | if (link != NULL((void *)0)) { | |||
870 | unsigned int enc_inst, tg_inst = 0; | |||
871 | unsigned int pix_clk_100hz; | |||
872 | ||||
873 | enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); | |||
874 | if (enc_inst != ENGINE_ID_UNKNOWN) { | |||
875 | for (j = 0; j < dc->res_pool->stream_enc_count; j++) { | |||
876 | if (dc->res_pool->stream_enc[j]->id == enc_inst) { | |||
877 | tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( | |||
878 | dc->res_pool->stream_enc[j]); | |||
879 | break; | |||
880 | } | |||
881 | } | |||
882 | ||||
883 | dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( | |||
884 | dc->res_pool->dp_clock_source, | |||
885 | tg_inst, &pix_clk_100hz); | |||
886 | ||||
887 | if (link->link_status.link_active) { | |||
888 | uint32_t requested_pix_clk_100hz = | |||
889 | pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; | |||
890 | ||||
891 | if (pix_clk_100hz != requested_pix_clk_100hz) { | |||
892 | core_link_disable_stream(pipe); | |||
893 | pipe->stream->dpms_off = false0; | |||
894 | } | |||
895 | } | |||
896 | } | |||
897 | } | |||
898 | } | |||
899 | } | |||
900 | ||||
901 | static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) | |||
902 | { | |||
903 | int i; | |||
904 | PERF_TRACE()trace_amdgpu_dc_performance(dc->ctx->perf_trace->read_count , dc->ctx->perf_trace->write_count, &dc->ctx-> perf_trace->last_entry_read, &dc->ctx->perf_trace ->last_entry_write, __func__, 904); | |||
905 | for (i = 0; i < MAX_PIPES6; i++) { | |||
906 | int count = 0; | |||
907 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; | |||
908 | ||||
909 | if (!pipe->plane_state) | |||
910 | continue; | |||
911 | ||||
912 | /* Timeout 100 ms */ | |||
913 | while (count < 100000) { | |||
914 | /* Must set to false to start with, due to OR in update function */ | |||
915 | pipe->plane_state->status.is_flip_pending = false0; | |||
916 | dc->hwss.update_pending_status(pipe); | |||
917 | if (!pipe->plane_state->status.is_flip_pending) | |||
918 | break; | |||
919 | udelay(1); | |||
920 | count++; | |||
921 | } | |||
922 | ASSERT(!pipe->plane_state->status.is_flip_pending)do { if (({ static int __warned; int __ret = !!(!(!pipe->plane_state ->status.is_flip_pending)); if (__ret && !__warned ) { printf("WARNING %s failed at %s:%d\n", "!(!pipe->plane_state->status.is_flip_pending)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/core/amdgpu_dc.c", 922); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); | |||
923 | } | |||
924 | PERF_TRACE()trace_amdgpu_dc_performance(dc->ctx->perf_trace->read_count , dc->ctx->perf_trace->write_count, &dc->ctx-> perf_trace->last_entry_read, &dc->ctx->perf_trace ->last_entry_write, __func__, 924); | |||
925 | } | |||
926 | ||||
927 | /******************************************************************************* | |||
928 | * Public functions | |||
929 | ******************************************************************************/ | |||
930 | ||||
931 | struct dc *dc_create(const struct dc_init_data *init_params) | |||
932 | { | |||
933 | struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL(0x0001 | 0x0004)); | |||
934 | unsigned int full_pipe_count; | |||
935 | ||||
936 | if (NULL((void *)0) == dc) | |||
937 | goto alloc_fail; | |||
938 | ||||
939 | if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { | |||
940 | if (false0 == dc_construct_ctx(dc, init_params)) { | |||
941 | dc_destruct(dc); | |||
942 | goto construct_fail; | |||
943 | } | |||
944 | } else { | |||
945 | if (false0 == dc_construct(dc, init_params)) { | |||
946 | dc_destruct(dc); | |||
947 | goto construct_fail; | |||
948 | } | |||
949 | ||||
950 | full_pipe_count = dc->res_pool->pipe_count; | |||
951 | if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE-1) | |||
952 | full_pipe_count--; | |||
953 | dc->caps.max_streams = min((((full_pipe_count)<(dc->res_pool->stream_enc_count) )?(full_pipe_count):(dc->res_pool->stream_enc_count)) | |||
954 | full_pipe_count,(((full_pipe_count)<(dc->res_pool->stream_enc_count) )?(full_pipe_count):(dc->res_pool->stream_enc_count)) | |||
955 | dc->res_pool->stream_enc_count)(((full_pipe_count)<(dc->res_pool->stream_enc_count) )?(full_pipe_count):(dc->res_pool->stream_enc_count)); | |||
956 | ||||
957 | dc->optimize_seamless_boot_streams = 0; | |||
958 | dc->caps.max_links = dc->link_count; | |||
959 | dc->caps.max_audios = dc->res_pool->audio_count; | |||
960 | dc->caps.linear_pitch_alignment = 64; | |||
961 | ||||
962 | dc->caps.max_dp_protocol_version = DP_VERSION_1_4; | |||
963 | ||||
964 | if (dc->res_pool->dmcu != NULL((void *)0)) | |||
965 | dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; | |||
966 | } | |||
967 | ||||
968 | /* Populate versioning information */ | |||
969 | dc->versions.dc_ver = DC_VER"3.2.104"; | |||
970 | ||||
971 | dc->build_id = DC_BUILD_ID; | |||
972 | ||||
973 | DC_LOG_DC("Display Core initialized\n")__drm_dbg(DRM_UT_KMS, "Display Core initialized\n"); | |||
974 | ||||
975 | ||||
976 | ||||
977 | return dc; | |||
978 | ||||
979 | construct_fail: | |||
980 | kfree(dc); | |||
981 | ||||
982 | alloc_fail: | |||
983 | return NULL((void *)0); | |||
984 | } | |||
985 | ||||
986 | void dc_hardware_init(struct dc *dc) | |||
987 | { | |||
988 | if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) | |||
989 | dc->hwss.init_hw(dc); | |||
990 | } | |||
991 | ||||
992 | void dc_init_callbacks(struct dc *dc, | |||
993 | const struct dc_callback_init *init_params) | |||
994 | { | |||
995 | #ifdef CONFIG_DRM_AMD_DC_HDCP | |||
996 | dc->ctx->cp_psp = init_params->cp_psp; | |||
997 | #endif | |||
998 | } | |||
999 | ||||
1000 | void dc_deinit_callbacks(struct dc *dc) | |||
1001 | { | |||
1002 | #ifdef CONFIG_DRM_AMD_DC_HDCP | |||
1003 | memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp))__builtin_memset((&dc->ctx->cp_psp), (0), (sizeof(dc ->ctx->cp_psp))); | |||
1004 | #endif | |||
1005 | } | |||
1006 | ||||
1007 | void dc_destroy(struct dc **dc) | |||
1008 | { | |||
1009 | dc_destruct(*dc); | |||
1010 | kfree(*dc); | |||
1011 | *dc = NULL((void *)0); | |||
1012 | } | |||
1013 | ||||
1014 | static void enable_timing_multisync( | |||
1015 | struct dc *dc, | |||
1016 | struct dc_state *ctx) | |||
1017 | { | |||
1018 | int i = 0, multisync_count = 0; | |||
1019 | int pipe_count = dc->res_pool->pipe_count; | |||
1020 | struct pipe_ctx *multisync_pipes[MAX_PIPES6] = { NULL((void *)0) }; | |||
1021 | ||||
1022 | for (i = 0; i < pipe_count; i++) { | |||
1023 | if (!ctx->res_ctx.pipe_ctx[i].stream || | |||
1024 | !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) | |||
1025 | continue; | |||
1026 | if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) | |||
1027 | continue; | |||
1028 | multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; | |||
1029 | multisync_count++; | |||
1030 | } | |||
1031 | ||||
1032 | if (multisync_count > 0) { | |||
1033 | dc->hwss.enable_per_frame_crtc_position_reset( | |||
1034 | dc, multisync_count, multisync_pipes); | |||
1035 | } | |||
1036 | } | |||
1037 | ||||
1038 | static void program_timing_sync( | |||
1039 | struct dc *dc, | |||
1040 | struct dc_state *ctx) | |||
1041 | { | |||
1042 | int i, j, k; | |||
1043 | int group_index = 0; | |||
1044 | int num_group = 0; | |||
1045 | int pipe_count = dc->res_pool->pipe_count; | |||
1046 | struct pipe_ctx *unsynced_pipes[MAX_PIPES6] = { NULL((void *)0) }; | |||
1047 | ||||
1048 | for (i = 0; i < pipe_count; i++) { | |||
1049 | if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe) | |||
1050 | continue; | |||
1051 | ||||
1052 | unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; | |||
1053 | } | |||
1054 | ||||
1055 | for (i = 0; i < pipe_count; i++) { | |||
1056 | int group_size = 1; | |||
1057 | struct pipe_ctx *pipe_set[MAX_PIPES6]; | |||
1058 | ||||
1059 | if (!unsynced_pipes[i]) | |||
1060 | continue; | |||
1061 | ||||
1062 | pipe_set[0] = unsynced_pipes[i]; | |||
1063 | unsynced_pipes[i] = NULL((void *)0); | |||
1064 | ||||
1065 | /* Add tg to the set, search rest of the tg's for ones with | |||
1066 | * same timing, add all tgs with same timing to the group | |||
1067 | */ | |||
1068 | for (j = i + 1; j < pipe_count; j++) { | |||
1069 | if (!unsynced_pipes[j]) | |||
1070 | continue; | |||
1071 | ||||
1072 | if (resource_are_streams_timing_synchronizable( | |||
1073 | unsynced_pipes[j]->stream, | |||
1074 | pipe_set[0]->stream)) { | |||
1075 | pipe_set[group_size] = unsynced_pipes[j]; | |||
1076 | unsynced_pipes[j] = NULL((void *)0); | |||
1077 | group_size++; | |||
1078 | } | |||
1079 | } | |||
1080 | ||||
1081 | /* set first unblanked pipe as master */ | |||
1082 | for (j = 0; j < group_size; j++) { | |||
1083 | bool_Bool is_blanked; | |||
1084 | ||||
1085 | if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) | |||
1086 | is_blanked = | |||
1087 | pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); | |||
1088 | else | |||
1089 | is_blanked = | |||
1090 | pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); | |||
1091 | if (!is_blanked) { | |||
1092 | if (j == 0) | |||
1093 | break; | |||
1094 | ||||
1095 | swap(pipe_set[0], pipe_set[j])do { __typeof(pipe_set[0]) __tmp = (pipe_set[0]); (pipe_set[0 ]) = (pipe_set[j]); (pipe_set[j]) = __tmp; } while(0); | |||
1096 | break; | |||
1097 | } | |||
1098 | } | |||
1099 | ||||
1100 | ||||
1101 | for (k = 0; k < group_size; k++) { | |||
1102 | struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); | |||
1103 | ||||
1104 | status->timing_sync_info.group_id = num_group; | |||
1105 | status->timing_sync_info.group_size = group_size; | |||
1106 | if (k == 0) | |||
1107 | status->timing_sync_info.master = true1; | |||
1108 | else | |||
1109 | status->timing_sync_info.master = false0; | |||
1110 | ||||
1111 | } | |||
1112 | /* remove any other unblanked pipes as they have already been synced */ | |||
1113 | for (j = j + 1; j < group_size; j++) { | |||
1114 | bool_Bool is_blanked; | |||
1115 | ||||
1116 | if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) | |||
1117 | is_blanked = | |||
1118 | pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); | |||
1119 | else | |||
1120 | is_blanked = | |||
1121 | pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); | |||
1122 | if (!is_blanked) { | |||
1123 | group_size--; | |||
1124 | pipe_set[j] = pipe_set[group_size]; | |||
1125 | j--; | |||
1126 | } | |||
1127 | } | |||
1128 | ||||
1129 | if (group_size > 1) { | |||
1130 | dc->hwss.enable_timing_synchronization( | |||
1131 | dc, group_index, group_size, pipe_set); | |||
1132 | group_index++; | |||
1133 | } | |||
1134 | num_group++; | |||
1135 | } | |||
1136 | } | |||
1137 | ||||
1138 | static bool_Bool context_changed( | |||
1139 | struct dc *dc, | |||
1140 | struct dc_state *context) | |||
1141 | { | |||
1142 | uint8_t i; | |||
1143 | ||||
1144 | if (context->stream_count != dc->current_state->stream_count) | |||
1145 | return true1; | |||
1146 | ||||
1147 | for (i = 0; i < dc->current_state->stream_count; i++) { | |||
1148 | if (dc->current_state->streams[i] != context->streams[i]) | |||
1149 | return true1; | |||
1150 | } | |||
1151 | ||||
1152 | return false0; | |||
1153 | } | |||
1154 | ||||
1155 | bool_Bool dc_validate_seamless_boot_timing(const struct dc *dc, | |||
1156 | const struct dc_sink *sink, | |||
1157 | struct dc_crtc_timing *crtc_timing) | |||
1158 | { | |||
1159 | struct timing_generator *tg; | |||
1160 | struct stream_encoder *se = NULL((void *)0); | |||
1161 | ||||
1162 | struct dc_crtc_timing hw_crtc_timing = {0}; | |||
1163 | ||||
1164 | struct dc_link *link = sink->link; | |||
1165 | unsigned int i, enc_inst, tg_inst = 0; | |||
1166 | ||||
1167 | // Seamless port only support single DP and EDP so far | |||
1168 | if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT && | |||
1169 | sink->sink_signal != SIGNAL_TYPE_EDP) | |||
1170 | return false0; | |||
1171 | ||||
1172 | /* Check for enabled DIG to identify enabled display */ | |||
1173 | if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) | |||
1174 | return false0; | |||
1175 | ||||
1176 | enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); | |||
1177 | ||||
1178 | if (enc_inst == ENGINE_ID_UNKNOWN) | |||
1179 | return false0; | |||
1180 | ||||
1181 | for (i = 0; i < dc->res_pool->stream_enc_count; i++) { | |||
1182 | if (dc->res_pool->stream_enc[i]->id == enc_inst) { | |||
1183 | ||||
1184 | se = dc->res_pool->stream_enc[i]; | |||
1185 | ||||
1186 | tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( | |||
1187 | dc->res_pool->stream_enc[i]); | |||
1188 | break; | |||
1189 | } | |||
1190 | } | |||
1191 | ||||
1192 | // tg_inst not found | |||
1193 | if (i == dc->res_pool->stream_enc_count) | |||
1194 | return false0; | |||
1195 | ||||
1196 | if (tg_inst >= dc->res_pool->timing_generator_count) | |||
1197 | return false0; | |||
1198 | ||||
1199 | tg = dc->res_pool->timing_generators[tg_inst]; | |||
1200 | ||||
1201 | if (!tg->funcs->get_hw_timing) | |||
1202 | return false0; | |||
1203 | ||||
1204 | if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) | |||
1205 | return false0; | |||
1206 | ||||
1207 | if (crtc_timing->h_total != hw_crtc_timing.h_total) | |||
1208 | return false0; | |||
1209 | ||||
1210 | if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) | |||
1211 | return false0; | |||
1212 | ||||
1213 | if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) | |||
1214 | return false0; | |||
1215 | ||||
1216 | if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) | |||
1217 | return false0; | |||
1218 | ||||
1219 | if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) | |||
1220 | return false0; | |||
1221 | ||||
1222 | if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) | |||
1223 | return false0; | |||
1224 | ||||
1225 | if (crtc_timing->v_total != hw_crtc_timing.v_total) | |||
1226 | return false0; | |||
1227 | ||||
1228 | if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) | |||
1229 | return false0; | |||
1230 | ||||
1231 | if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) | |||
1232 | return false0; | |||
1233 | ||||
1234 | if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) | |||
1235 | return false0; | |||
1236 | ||||
1237 | if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) | |||
1238 | return false0; | |||
1239 | ||||
1240 | if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) | |||
1241 | return false0; | |||
1242 | ||||
1243 | if (dc_is_dp_signal(link->connector_signal)) { | |||
1244 | unsigned int pix_clk_100hz; | |||
1245 | ||||
1246 | dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( | |||
1247 | dc->res_pool->dp_clock_source, | |||
1248 | tg_inst, &pix_clk_100hz); | |||
1249 | ||||
1250 | if (crtc_timing->pix_clk_100hz != pix_clk_100hz) | |||
1251 | return false0; | |||
1252 | ||||
1253 | if (!se->funcs->dp_get_pixel_format) | |||
1254 | return false0; | |||
1255 | ||||
1256 | if (!se->funcs->dp_get_pixel_format( | |||
1257 | se, | |||
1258 | &hw_crtc_timing.pixel_encoding, | |||
1259 | &hw_crtc_timing.display_color_depth)) | |||
1260 | return false0; | |||
1261 | ||||
1262 | if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) | |||
1263 | return false0; | |||
1264 | ||||
1265 | if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) | |||
1266 | return false0; | |||
1267 | } | |||
1268 | ||||
1269 | return true1; | |||
1270 | } | |||
1271 | ||||
1272 | bool_Bool dc_enable_stereo( | |||
1273 | struct dc *dc, | |||
1274 | struct dc_state *context, | |||
1275 | struct dc_stream_state *streams[], | |||
1276 | uint8_t stream_count) | |||
1277 | { | |||
1278 | bool_Bool ret = true1; | |||
1279 | int i, j; | |||
1280 | struct pipe_ctx *pipe; | |||
1281 | ||||
1282 | for (i = 0; i < MAX_PIPES6; i++) { | |||
1283 | if (context != NULL((void *)0)) | |||
1284 | pipe = &context->res_ctx.pipe_ctx[i]; | |||
1285 | else | |||
1286 | pipe = &dc->current_state->res_ctx.pipe_ctx[i]; | |||
1287 | for (j = 0 ; pipe && j < stream_count; j++) { | |||
1288 | if (streams[j] && streams[j] == pipe->stream && | |||
1289 | dc->hwss.setup_stereo) | |||
1290 | dc->hwss.setup_stereo(pipe, dc); | |||
1291 | } | |||
1292 | } | |||
1293 | ||||
1294 | return ret; | |||
1295 | } | |||
1296 | ||||
1297 | void dc_trigger_sync(struct dc *dc, struct dc_state *context) | |||
1298 | { | |||
1299 | if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { | |||
1300 | enable_timing_multisync(dc, context); | |||
1301 | program_timing_sync(dc, context); | |||
1302 | } | |||
1303 | } | |||
1304 | ||||
1305 | static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) | |||
1306 | { | |||
1307 | int i; | |||
1308 | unsigned int stream_mask = 0; | |||
1309 | ||||
1310 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |||
1311 | if (context->res_ctx.pipe_ctx[i].stream) | |||
1312 | stream_mask |= 1 << i; | |||
1313 | } | |||
1314 | ||||
1315 | return stream_mask; | |||
1316 | } | |||
1317 | ||||
1318 | /* | |||
1319 | * Applies given context to HW and copy it into current context. | |||
1320 | * It's up to the user to release the src context afterwards. | |||
1321 | */ | |||
1322 | static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) | |||
1323 | { | |||
1324 | struct dc_bios *dcb = dc->ctx->dc_bios; | |||
1325 | enum dc_status result = DC_ERROR_UNEXPECTED; | |||
1326 | struct pipe_ctx *pipe; | |||
1327 | int i, k, l; | |||
1328 | struct dc_stream_state *dc_streams[MAX_STREAMS6] = {0}; | |||
1329 | ||||
1330 | #if defined(CONFIG_DRM_AMD_DC_DCN3_01) | |||
1331 | dc_allow_idle_optimizations(dc, false0); | |||
1332 | #endif | |||
1333 | ||||
1334 | for (i = 0; i < context->stream_count; i++) | |||
1335 | dc_streams[i] = context->streams[i]; | |||
1336 | ||||
1337 | if (!dcb->funcs->is_accelerated_mode(dcb)) { | |||
1338 | disable_vbios_mode_if_required(dc, context); | |||
1339 | dc->hwss.enable_accelerated_mode(dc, context); | |||
1340 | } | |||
1341 | ||||
1342 | for (i = 0; i < context->stream_count; i++) | |||
1343 | if (context->streams[i]->apply_seamless_boot_optimization) | |||
1344 | dc->optimize_seamless_boot_streams++; | |||
1345 | ||||
1346 | if (context->stream_count > dc->optimize_seamless_boot_streams || | |||
1347 | context->stream_count == 0) | |||
1348 | dc->hwss.prepare_bandwidth(dc, context); | |||
1349 | ||||
1350 | disable_dangling_plane(dc, context); | |||
1351 | /* re-program planes for existing stream, in case we need to | |||
1352 | * free up plane resource for later use | |||
1353 | */ | |||
1354 | if (dc->hwss.apply_ctx_for_surface) { | |||
1355 | for (i = 0; i < context->stream_count; i++) { | |||
1356 | if (context->streams[i]->mode_changed) | |||
1357 | continue; | |||
1358 | apply_ctx_interdependent_lock(dc, context, context->streams[i], true1); | |||
1359 | dc->hwss.apply_ctx_for_surface( | |||
1360 | dc, context->streams[i], | |||
1361 | context->stream_status[i].plane_count, | |||
1362 | context); /* use new pipe config in new context */ | |||
1363 | apply_ctx_interdependent_lock(dc, context, context->streams[i], false0); | |||
1364 | dc->hwss.post_unlock_program_front_end(dc, context); | |||
1365 | } | |||
1366 | } | |||
1367 | ||||
1368 | /* Program hardware */ | |||
1369 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |||
1370 | pipe = &context->res_ctx.pipe_ctx[i]; | |||
1371 | dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); | |||
1372 | } | |||
1373 | ||||
1374 | result = dc->hwss.apply_ctx_to_hw(dc, context); | |||
1375 | ||||
1376 | if (result != DC_OK) | |||
1377 | return result; | |||
1378 | ||||
1379 | dc_trigger_sync(dc, context); | |||
1380 | ||||
1381 | /* Program all planes within new context*/ | |||
1382 | if (dc->hwss.program_front_end_for_ctx) { | |||
1383 | dc->hwss.interdependent_update_lock(dc, context, true1); | |||
1384 | dc->hwss.program_front_end_for_ctx(dc, context); | |||
1385 | dc->hwss.interdependent_update_lock(dc, context, false0); | |||
1386 | dc->hwss.post_unlock_program_front_end(dc, context); | |||
1387 | } | |||
1388 | for (i = 0; i < context->stream_count; i++) { | |||
1389 | const struct dc_link *link = context->streams[i]->link; | |||
1390 | ||||
1391 | if (!context->streams[i]->mode_changed) | |||
1392 | continue; | |||
1393 | ||||
1394 | if (dc->hwss.apply_ctx_for_surface) { | |||
1395 | apply_ctx_interdependent_lock(dc, context, context->streams[i], true1); | |||
1396 | dc->hwss.apply_ctx_for_surface( | |||
1397 | dc, context->streams[i], | |||
1398 | context->stream_status[i].plane_count, | |||
1399 | context); | |||
1400 | apply_ctx_interdependent_lock(dc, context, context->streams[i], false0); | |||
1401 | dc->hwss.post_unlock_program_front_end(dc, context); | |||
1402 | } | |||
1403 | ||||
1404 | /* | |||
1405 | * enable stereo | |||
1406 | * TODO rework dc_enable_stereo call to work with validation sets? | |||
1407 | */ | |||
1408 | for (k = 0; k < MAX_PIPES6; k++) { | |||
1409 | pipe = &context->res_ctx.pipe_ctx[k]; | |||
1410 | ||||
1411 | for (l = 0 ; pipe && l < context->stream_count; l++) { | |||
1412 | if (context->streams[l] && | |||
1413 | context->streams[l] == pipe->stream && | |||
1414 | dc->hwss.setup_stereo) | |||
1415 | dc->hwss.setup_stereo(pipe, dc); | |||
1416 | } | |||
1417 | } | |||
1418 | ||||
1419 | CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",do { (void)(link); __drm_dbg(DRM_UT_KMS, "{%dx%d, %dx%d@%dKhz}" , context->streams[i]->timing.h_addressable, context-> streams[i]->timing.v_addressable, context->streams[i]-> timing.h_total, context->streams[i]->timing.v_total, context ->streams[i]->timing.pix_clk_100hz / 10); } while (0) | |||
1420 | context->streams[i]->timing.h_addressable,do { (void)(link); __drm_dbg(DRM_UT_KMS, "{%dx%d, %dx%d@%dKhz}" , context->streams[i]->timing.h_addressable, context-> streams[i]->timing.v_addressable, context->streams[i]-> timing.h_total, context->streams[i]->timing.v_total, context ->streams[i]->timing.pix_clk_100hz / 10); } while (0) | |||
1421 | context->streams[i]->timing.v_addressable,do { (void)(link); __drm_dbg(DRM_UT_KMS, "{%dx%d, %dx%d@%dKhz}" , context->streams[i]->timing.h_addressable, context-> streams[i]->timing.v_addressable, context->streams[i]-> timing.h_total, context->streams[i]->timing.v_total, context ->streams[i]->timing.pix_clk_100hz / 10); } while (0) | |||
1422 | context->streams[i]->timing.h_total,do { (void)(link); __drm_dbg(DRM_UT_KMS, "{%dx%d, %dx%d@%dKhz}" , context->streams[i]->timing.h_addressable, context-> streams[i]->timing.v_addressable, context->streams[i]-> timing.h_total, context->streams[i]->timing.v_total, context ->streams[i]->timing.pix_clk_100hz / 10); } while (0) | |||
1423 | context->streams[i]->timing.v_total,do { (void)(link); __drm_dbg(DRM_UT_KMS, "{%dx%d, %dx%d@%dKhz}" , context->streams[i]->timing.h_addressable, context-> streams[i]->timing.v_addressable, context->streams[i]-> timing.h_total, context->streams[i]->timing.v_total, context ->streams[i]->timing.pix_clk_100hz / 10); } while (0) | |||
1424 | context->streams[i]->timing.pix_clk_100hz / 10)do { (void)(link); __drm_dbg(DRM_UT_KMS, "{%dx%d, %dx%d@%dKhz}" , context->streams[i]->timing.h_addressable, context-> streams[i]->timing.v_addressable, context->streams[i]-> timing.h_total, context->streams[i]->timing.v_total, context ->streams[i]->timing.pix_clk_100hz / 10); } while (0); | |||
1425 | } | |||
1426 | ||||
1427 | dc_enable_stereo(dc, context, dc_streams, context->stream_count); | |||
1428 | ||||
1429 | if (context->stream_count > dc->optimize_seamless_boot_streams || | |||
1430 | context->stream_count == 0) { | |||
1431 | /* Must wait for no flips to be pending before doing optimize bw */ | |||
1432 | wait_for_no_pipes_pending(dc, context); | |||
1433 | /* pplib is notified if disp_num changed */ | |||
1434 | dc->hwss.optimize_bandwidth(dc, context); | |||
1435 | } | |||
1436 | ||||
1437 | context->stream_mask = get_stream_mask(dc, context); | |||
1438 | ||||
1439 | if (context->stream_mask != dc->current_state->stream_mask) | |||
1440 | dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); | |||
1441 | ||||
1442 | for (i = 0; i < context->stream_count; i++) | |||
1443 | context->streams[i]->mode_changed = false0; | |||
1444 | ||||
1445 | dc_release_state(dc->current_state); | |||
1446 | ||||
1447 | dc->current_state = context; | |||
1448 | ||||
1449 | dc_retain_state(dc->current_state); | |||
1450 | ||||
1451 | return result; | |||
1452 | } | |||
1453 | ||||
1454 | bool_Bool dc_commit_state(struct dc *dc, struct dc_state *context) | |||
1455 | { | |||
1456 | enum dc_status result = DC_ERROR_UNEXPECTED; | |||
1457 | int i; | |||
1458 | ||||
1459 | if (false0 == context_changed(dc, context)) | |||
1460 | return DC_OK; | |||
1461 | ||||
1462 | DC_LOG_DC("%s: %d streams\n",__drm_dbg(DRM_UT_KMS, "%s: %d streams\n", __func__, context-> stream_count) | |||
1463 | __func__, context->stream_count)__drm_dbg(DRM_UT_KMS, "%s: %d streams\n", __func__, context-> stream_count); | |||
1464 | ||||
1465 | for (i = 0; i < context->stream_count; i++) { | |||
1466 | struct dc_stream_state *stream = context->streams[i]; | |||
1467 | ||||
1468 | dc_stream_log(dc, stream); | |||
1469 | } | |||
1470 | ||||
1471 | result = dc_commit_state_no_check(dc, context); | |||
1472 | ||||
1473 | return (result == DC_OK); | |||
1474 | } | |||
1475 | ||||
1476 | #if defined(CONFIG_DRM_AMD_DC_DCN3_01) | |||
1477 | bool_Bool dc_acquire_release_mpc_3dlut( | |||
1478 | struct dc *dc, bool_Bool acquire, | |||
1479 | struct dc_stream_state *stream, | |||
1480 | struct dc_3dlut **lut, | |||
1481 | struct dc_transfer_func **shaper) | |||
1482 | { | |||
1483 | int pipe_idx; | |||
1484 | bool_Bool ret = false0; | |||
1485 | bool_Bool found_pipe_idx = false0; | |||
1486 | const struct resource_pool *pool = dc->res_pool; | |||
1487 | struct resource_context *res_ctx = &dc->current_state->res_ctx; | |||
1488 | int mpcc_id = 0; | |||
1489 | ||||
1490 | if (pool && res_ctx) { | |||
1491 | if (acquire) { | |||
1492 | /*find pipe idx for the given stream*/ | |||
1493 | for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { | |||
1494 | if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { | |||
1495 | found_pipe_idx = true1; | |||
1496 | mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; | |||
1497 | break; | |||
1498 | } | |||
1499 | } | |||
1500 | } else | |||
1501 | found_pipe_idx = true1;/*for release pipe_idx is not required*/ | |||
1502 | ||||
1503 | if (found_pipe_idx) { | |||
1504 | if (acquire && pool->funcs->acquire_post_bldn_3dlut) | |||
1505 | ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); | |||
1506 | else if (acquire == false0 && pool->funcs->release_post_bldn_3dlut) | |||
1507 | ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); | |||
1508 | } | |||
1509 | } | |||
1510 | return ret; | |||
1511 | } | |||
1512 | #endif | |||
1513 | static bool_Bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) | |||
1514 | { | |||
1515 | int i; | |||
1516 | struct pipe_ctx *pipe; | |||
1517 | ||||
1518 | for (i = 0; i < MAX_PIPES6; i++) { | |||
1519 | pipe = &context->res_ctx.pipe_ctx[i]; | |||
1520 | ||||
1521 | if (!pipe->plane_state) | |||
1522 | continue; | |||
1523 | ||||
1524 | /* Must set to false to start with, due to OR in update function */ | |||
1525 | pipe->plane_state->status.is_flip_pending = false0; | |||
1526 | dc->hwss.update_pending_status(pipe); | |||
1527 | if (pipe->plane_state->status.is_flip_pending) | |||
1528 | return true1; | |||
1529 | } | |||
1530 | return false0; | |||
1531 | } | |||
1532 | ||||
1533 | bool_Bool dc_post_update_surfaces_to_stream(struct dc *dc) | |||
1534 | { | |||
1535 | int i; | |||
1536 | struct dc_state *context = dc->current_state; | |||
1537 | ||||
1538 | if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0) | |||
1539 | return true1; | |||
1540 | ||||
1541 | post_surface_trace(dc); | |||
1542 | ||||
1543 | if (is_flip_pending_in_pipes(dc, context)) | |||
1544 | return true1; | |||
1545 | ||||
1546 | for (i = 0; i < dc->res_pool->pipe_count; i++) | |||
1547 | if (context->res_ctx.pipe_ctx[i].stream == NULL((void *)0) || | |||
1548 | context->res_ctx.pipe_ctx[i].plane_state == NULL((void *)0)) { | |||
1549 | context->res_ctx.pipe_ctx[i].pipe_idx = i; | |||
1550 | dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); | |||
1551 | } | |||
1552 | ||||
1553 | dc->hwss.optimize_bandwidth(dc, context); | |||
1554 | ||||
1555 | dc->optimized_required = false0; | |||
1556 | dc->wm_optimized_required = false0; | |||
1557 | ||||
1558 | return true1; | |||
1559 | } | |||
1560 | ||||
1561 | static void init_state(struct dc *dc, struct dc_state *context) | |||
1562 | { | |||
1563 | /* Each context must have their own instance of VBA and in order to | |||
1564 | * initialize and obtain IP and SOC the base DML instance from DC is | |||
1565 | * initially copied into every context | |||
1566 | */ | |||
1567 | #ifdef CONFIG_DRM_AMD_DC_DCN1 | |||
1568 | memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib))__builtin_memcpy((&context->bw_ctx.dml), (&dc-> dml), (sizeof(struct display_mode_lib))); | |||
1569 | #endif | |||
1570 | } | |||
1571 | ||||
1572 | struct dc_state *dc_create_state(struct dc *dc) | |||
1573 | { | |||
1574 | struct dc_state *context = kvzalloc(sizeof(struct dc_state), | |||
1575 | GFP_KERNEL(0x0001 | 0x0004)); | |||
1576 | ||||
1577 | if (!context) | |||
1578 | return NULL((void *)0); | |||
1579 | ||||
1580 | init_state(dc, context); | |||
1581 | ||||
1582 | kref_init(&context->refcount); | |||
1583 | ||||
1584 | return context; | |||
1585 | } | |||
1586 | ||||
1587 | struct dc_state *dc_copy_state(struct dc_state *src_ctx) | |||
1588 | { | |||
1589 | int i, j; | |||
1590 | struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL(0x0001 | 0x0004)); | |||
1591 | ||||
1592 | if (!new_ctx) | |||
1593 | return NULL((void *)0); | |||
1594 | memcpy(new_ctx, src_ctx, sizeof(struct dc_state))__builtin_memcpy((new_ctx), (src_ctx), (sizeof(struct dc_state ))); | |||
1595 | ||||
1596 | for (i = 0; i < MAX_PIPES6; i++) { | |||
1597 | struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; | |||
1598 | ||||
1599 | if (cur_pipe->top_pipe) | |||
1600 | cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; | |||
1601 | ||||
1602 | if (cur_pipe->bottom_pipe) | |||
1603 | cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; | |||
1604 | ||||
1605 | if (cur_pipe->prev_odm_pipe) | |||
1606 | cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; | |||
1607 | ||||
1608 | if (cur_pipe->next_odm_pipe) | |||
1609 | cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; | |||
1610 | ||||
1611 | } | |||
1612 | ||||
1613 | for (i = 0; i < new_ctx->stream_count; i++) { | |||
1614 | dc_stream_retain(new_ctx->streams[i]); | |||
1615 | for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) | |||
1616 | dc_plane_state_retain( | |||
1617 | new_ctx->stream_status[i].plane_states[j]); | |||
1618 | } | |||
1619 | ||||
1620 | kref_init(&new_ctx->refcount); | |||
1621 | ||||
1622 | return new_ctx; | |||
1623 | } | |||
1624 | ||||
1625 | void dc_retain_state(struct dc_state *context) | |||
1626 | { | |||
1627 | kref_get(&context->refcount); | |||
1628 | } | |||
1629 | ||||
1630 | static void dc_state_free(struct kref *kref) | |||
1631 | { | |||
1632 | struct dc_state *context = container_of(kref, struct dc_state, refcount)({ const __typeof( ((struct dc_state *)0)->refcount ) *__mptr = (kref); (struct dc_state *)( (char *)__mptr - __builtin_offsetof (struct dc_state, refcount) );}); | |||
1633 | dc_resource_state_destruct(context); | |||
1634 | kvfree(context); | |||
1635 | } | |||
1636 | ||||
1637 | void dc_release_state(struct dc_state *context) | |||
1638 | { | |||
1639 | kref_put(&context->refcount, dc_state_free); | |||
1640 | } | |||
1641 | ||||
1642 | bool_Bool dc_set_generic_gpio_for_stereo(bool_Bool enable, | |||
1643 | struct gpio_service *gpio_service) | |||
1644 | { | |||
1645 | enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; | |||
1646 | struct gpio_pin_info pin_info; | |||
1647 | struct gpio *generic; | |||
1648 | struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), | |||
1649 | GFP_KERNEL(0x0001 | 0x0004)); | |||
1650 | ||||
1651 | if (!config) | |||
1652 | return false0; | |||
1653 | pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); | |||
1654 | ||||
1655 | if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { | |||
1656 | kfree(config); | |||
1657 | return false0; | |||
1658 | } else { | |||
1659 | generic = dal_gpio_service_create_generic_mux( | |||
1660 | gpio_service, | |||
1661 | pin_info.offset, | |||
1662 | pin_info.mask); | |||
1663 | } | |||
1664 | ||||
1665 | if (!generic) { | |||
1666 | kfree(config); | |||
1667 | return false0; | |||
1668 | } | |||
1669 | ||||
1670 | gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); | |||
1671 | ||||
1672 | config->enable_output_from_mux = enable; | |||
1673 | config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; | |||
1674 | ||||
1675 | if (gpio_result == GPIO_RESULT_OK) | |||
1676 | gpio_result = dal_mux_setup_config(generic, config); | |||
1677 | ||||
1678 | if (gpio_result == GPIO_RESULT_OK) { | |||
1679 | dal_gpio_close(generic); | |||
1680 | dal_gpio_destroy_generic_mux(&generic); | |||
1681 | kfree(config); | |||
1682 | return true1; | |||
1683 | } else { | |||
1684 | dal_gpio_close(generic); | |||
1685 | dal_gpio_destroy_generic_mux(&generic); | |||
1686 | kfree(config); | |||
1687 | return false0; | |||
1688 | } | |||
1689 | } | |||
1690 | ||||
1691 | static bool_Bool is_surface_in_context( | |||
1692 | const struct dc_state *context, | |||
1693 | const struct dc_plane_state *plane_state) | |||
1694 | { | |||
1695 | int j; | |||
1696 | ||||
1697 | for (j = 0; j < MAX_PIPES6; j++) { | |||
1698 | const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; | |||
1699 | ||||
1700 | if (plane_state == pipe_ctx->plane_state) { | |||
1701 | return true1; | |||
1702 | } | |||
1703 | } | |||
1704 | ||||
1705 | return false0; | |||
1706 | } | |||
1707 | ||||
1708 | static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) | |||
1709 | { | |||
1710 | union surface_update_flags *update_flags = &u->surface->update_flags; | |||
1711 | enum surface_update_type update_type = UPDATE_TYPE_FAST; | |||
1712 | ||||
1713 | if (!u->plane_info) | |||
1714 | return UPDATE_TYPE_FAST; | |||
1715 | ||||
1716 | if (u->plane_info->color_space != u->surface->color_space) { | |||
1717 | update_flags->bits.color_space_change = 1; | |||
1718 | elevate_update_type(&update_type, UPDATE_TYPE_MED); | |||
1719 | } | |||
1720 | ||||
1721 | if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { | |||
1722 | update_flags->bits.horizontal_mirror_change = 1; | |||
1723 | elevate_update_type(&update_type, UPDATE_TYPE_MED); | |||
1724 | } | |||
1725 | ||||
1726 | if (u->plane_info->rotation != u->surface->rotation) { | |||
1727 | update_flags->bits.rotation_change = 1; | |||
1728 | elevate_update_type(&update_type, UPDATE_TYPE_FULL); | |||
1729 | } | |||
1730 | ||||
1731 | if (u->plane_info->format != u->surface->format) { | |||
1732 | update_flags->bits.pixel_format_change = 1; | |||
1733 | elevate_update_type(&update_type, UPDATE_TYPE_FULL); | |||
1734 | } | |||
1735 | ||||
1736 | if (u->plane_info->stereo_format != u->surface->stereo_format) { | |||
1737 | update_flags->bits.stereo_format_change = 1; | |||
1738 | elevate_update_type(&update_type, UPDATE_TYPE_FULL); | |||
1739 | } | |||
1740 | ||||
1741 | if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { | |||
1742 | update_flags->bits.per_pixel_alpha_change = 1; | |||
1743 | elevate_update_type(&update_type, UPDATE_TYPE_MED); | |||
1744 | } | |||
1745 | ||||
1746 | if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { | |||
1747 | update_flags->bits.global_alpha_change = 1; | |||
1748 | elevate_update_type(&update_type, UPDATE_TYPE_MED); | |||
1749 | } | |||
1750 | ||||
1751 | if (u->plane_info->dcc.enable != u->surface->dcc.enable | |||
1752 | || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks | |||
1753 | || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { | |||
1754 | update_flags->bits.dcc_change = 1; | |||
1755 | elevate_update_type(&update_type, UPDATE_TYPE_MED); | |||
1756 | } | |||
1757 | ||||
1758 | if (resource_pixel_format_to_bpp(u->plane_info->format) != | |||
1759 | resource_pixel_format_to_bpp(u->surface->format)) { | |||
1760 | /* different bytes per element will require full bandwidth | |||
1761 | * and DML calculation | |||
1762 | */ | |||
1763 | update_flags->bits.bpp_change = 1; | |||
1764 | elevate_update_type(&update_type, UPDATE_TYPE_FULL); | |||
1765 | } | |||
1766 | ||||
1767 | if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch | |||
1768 | || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { | |||
1769 | update_flags->bits.plane_size_change = 1; | |||
1770 | elevate_update_type(&update_type, UPDATE_TYPE_MED); | |||
1771 | } | |||
1772 | ||||
1773 | ||||
1774 | if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,__builtin_memcmp((&u->plane_info->tiling_info), (& u->surface->tiling_info), (sizeof(union dc_tiling_info) )) | |||
1775 | sizeof(union dc_tiling_info))__builtin_memcmp((&u->plane_info->tiling_info), (& u->surface->tiling_info), (sizeof(union dc_tiling_info) )) != 0) { | |||
1776 | update_flags->bits.swizzle_change = 1; | |||
1777 | elevate_update_type(&update_type, UPDATE_TYPE_MED); | |||
1778 | ||||
1779 | /* todo: below are HW dependent, we should add a hook to | |||
1780 | * DCE/N resource and validated there. | |||
1781 | */ | |||
1782 | if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { | |||
1783 | /* swizzled mode requires RQ to be setup properly, | |||
1784 | * thus need to run DML to calculate RQ settings | |||
1785 | */ | |||
1786 | update_flags->bits.bandwidth_change = 1; | |||
1787 | elevate_update_type(&update_type, UPDATE_TYPE_FULL); | |||
1788 | } | |||
1789 | } | |||
1790 | ||||
1791 | /* This should be UPDATE_TYPE_FAST if nothing has changed. */ | |||
1792 | return update_type; | |||
1793 | } | |||
1794 | ||||
1795 | static enum surface_update_type get_scaling_info_update_type( | |||
1796 | const struct dc_surface_update *u) | |||
1797 | { | |||
1798 | union surface_update_flags *update_flags = &u->surface->update_flags; | |||
1799 | ||||
1800 | if (!u->scaling_info) | |||
1801 | return UPDATE_TYPE_FAST; | |||
1802 | ||||
1803 | if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width | |||
1804 | || u->scaling_info->clip_rect.height != u->surface->clip_rect.height | |||
1805 | || u->scaling_info->dst_rect.width != u->surface->dst_rect.width | |||
1806 | || u->scaling_info->dst_rect.height != u->surface->dst_rect.height | |||
1807 | || u->scaling_info->scaling_quality.integer_scaling != | |||
1808 | u->surface->scaling_quality.integer_scaling | |||
1809 | ) { | |||
1810 | update_flags->bits.scaling_change = 1; | |||
1811 | ||||
1812 | if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width | |||
1813 | || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) | |||
1814 | && (u->scaling_info->dst_rect.width < u->surface->src_rect.width | |||
1815 | || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) | |||
1816 | /* Making dst rect smaller requires a bandwidth change */ | |||
1817 | update_flags->bits.bandwidth_change = 1; | |||
1818 | } | |||
1819 | ||||
1820 | if (u->scaling_info->src_rect.width != u->surface->src_rect.width | |||
1821 | || u->scaling_info->src_rect.height != u->surface->src_rect.height) { | |||
1822 | ||||
1823 | update_flags->bits.scaling_change = 1; | |||
1824 | if (u->scaling_info->src_rect.width > u->surface->src_rect.width | |||
1825 | || u->scaling_info->src_rect.height > u->surface->src_rect.height) | |||
1826 | /* Making src rect bigger requires a bandwidth change */ | |||
1827 | update_flags->bits.clock_change = 1; | |||
1828 | } | |||
1829 | ||||
1830 | if (u->scaling_info->src_rect.x != u->surface->src_rect.x | |||
1831 | || u->scaling_info->src_rect.y != u->surface->src_rect.y | |||
1832 | || u->scaling_info->clip_rect.x != u->surface->clip_rect.x | |||
1833 | || u->scaling_info->clip_rect.y != u->surface->clip_rect.y | |||
1834 | || u->scaling_info->dst_rect.x != u->surface->dst_rect.x | |||
1835 | || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) | |||
1836 | update_flags->bits.position_change = 1; | |||
1837 | ||||
1838 | if (update_flags->bits.clock_change | |||
1839 | || update_flags->bits.bandwidth_change | |||
1840 | || update_flags->bits.scaling_change) | |||
1841 | return UPDATE_TYPE_FULL; | |||
1842 | ||||
1843 | if (update_flags->bits.position_change) | |||
1844 | return UPDATE_TYPE_MED; | |||
1845 | ||||
1846 | return UPDATE_TYPE_FAST; | |||
1847 | } | |||
1848 | ||||
1849 | static enum surface_update_type det_surface_update(const struct dc *dc, | |||
1850 | const struct dc_surface_update *u) | |||
1851 | { | |||
1852 | const struct dc_state *context = dc->current_state; | |||
1853 | enum surface_update_type type; | |||
1854 | enum surface_update_type overall_type = UPDATE_TYPE_FAST; | |||
1855 | union surface_update_flags *update_flags = &u->surface->update_flags; | |||
1856 | ||||
1857 | if (u->flip_addr) | |||
1858 | update_flags->bits.addr_update = 1; | |||
1859 | ||||
1860 | if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { | |||
1861 | update_flags->raw = 0xFFFFFFFF; | |||
1862 | return UPDATE_TYPE_FULL; | |||
1863 | } | |||
1864 | ||||
1865 | update_flags->raw = 0; // Reset all flags | |||
1866 | ||||
1867 | type = get_plane_info_update_type(u); | |||
1868 | elevate_update_type(&overall_type, type); | |||
1869 | ||||
1870 | type = get_scaling_info_update_type(u); | |||
1871 | elevate_update_type(&overall_type, type); | |||
1872 | ||||
1873 | if (u->flip_addr) | |||
1874 | update_flags->bits.addr_update = 1; | |||
1875 | ||||
1876 | if (u->in_transfer_func) | |||
1877 | update_flags->bits.in_transfer_func_change = 1; | |||
1878 | ||||
1879 | if (u->input_csc_color_matrix) | |||
1880 | update_flags->bits.input_csc_change = 1; | |||
1881 | ||||
1882 | if (u->coeff_reduction_factor) | |||
1883 | update_flags->bits.coeff_reduction_change = 1; | |||
1884 | ||||
1885 | if (u->gamut_remap_matrix) | |||
1886 | update_flags->bits.gamut_remap_change = 1; | |||
1887 | ||||
1888 | if (u->gamma) { | |||
1889 | enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; | |||
1890 | ||||
1891 | if (u->plane_info) | |||
1892 | format = u->plane_info->format; | |||
1893 | else if (u->surface) | |||
1894 | format = u->surface->format; | |||
1895 | ||||
1896 | if (dce_use_lut(format)) | |||
1897 | update_flags->bits.gamma_change = 1; | |||
1898 | } | |||
1899 | ||||
1900 | if (u->hdr_mult.value) | |||
1901 | if (u->hdr_mult.value != u->surface->hdr_mult.value) { | |||
1902 | update_flags->bits.hdr_mult = 1; | |||
1903 | elevate_update_type(&overall_type, UPDATE_TYPE_MED); | |||
1904 | } | |||
1905 | ||||
1906 | if (update_flags->bits.in_transfer_func_change) { | |||
1907 | type = UPDATE_TYPE_MED; | |||
1908 | elevate_update_type(&overall_type, type); | |||
1909 | } | |||
1910 | ||||
1911 | if (update_flags->bits.input_csc_change | |||
1912 | || update_flags->bits.coeff_reduction_change | |||
1913 | || update_flags->bits.gamma_change | |||
1914 | || update_flags->bits.gamut_remap_change) { | |||
1915 | type = UPDATE_TYPE_FULL; | |||
1916 | elevate_update_type(&overall_type, type); | |||
1917 | } | |||
1918 | ||||
1919 | return overall_type; | |||
1920 | } | |||
1921 | ||||
1922 | static enum surface_update_type check_update_surfaces_for_stream( | |||
1923 | struct dc *dc, | |||
1924 | struct dc_surface_update *updates, | |||
1925 | int surface_count, | |||
1926 | struct dc_stream_update *stream_update, | |||
1927 | const struct dc_stream_status *stream_status) | |||
1928 | { | |||
1929 | int i; | |||
1930 | enum surface_update_type overall_type = UPDATE_TYPE_FAST; | |||
1931 | ||||
1932 | #if defined(CONFIG_DRM_AMD_DC_DCN3_01) | |||
1933 | if (dc->idle_optimizations_allowed) | |||
1934 | overall_type = UPDATE_TYPE_FULL; | |||
1935 | ||||
1936 | #endif | |||
1937 | if (stream_status == NULL((void *)0) || stream_status->plane_count != surface_count) | |||
1938 | overall_type = UPDATE_TYPE_FULL; | |||
1939 | ||||
1940 | /* some stream updates require passive update */ | |||
1941 | if (stream_update) { | |||
1942 | union stream_update_flags *su_flags = &stream_update->stream->update_flags; | |||
1943 | ||||
1944 | if ((stream_update->src.height != 0 && stream_update->src.width != 0) || | |||
1945 | (stream_update->dst.height != 0 && stream_update->dst.width != 0) || | |||
1946 | stream_update->integer_scaling_update) | |||
1947 | su_flags->bits.scaling = 1; | |||
1948 | ||||
1949 | if (stream_update->out_transfer_func) | |||
1950 | su_flags->bits.out_tf = 1; | |||
1951 | ||||
1952 | if (stream_update->abm_level) | |||
1953 | su_flags->bits.abm_level = 1; | |||
1954 | ||||
1955 | if (stream_update->dpms_off) | |||
1956 | su_flags->bits.dpms_off = 1; | |||
1957 | ||||
1958 | if (stream_update->gamut_remap) | |||
1959 | su_flags->bits.gamut_remap = 1; | |||
1960 | ||||
1961 | if (stream_update->wb_update) | |||
1962 | su_flags->bits.wb_update = 1; | |||
1963 | ||||
1964 | if (stream_update->dsc_config) | |||
1965 | su_flags->bits.dsc_changed = 1; | |||
1966 | ||||
1967 | if (su_flags->raw != 0) | |||
1968 | overall_type = UPDATE_TYPE_FULL; | |||
1969 | ||||
1970 | if (stream_update->output_csc_transform || stream_update->output_color_space) | |||
1971 | su_flags->bits.out_csc = 1; | |||
1972 | } | |||
1973 | ||||
1974 | for (i = 0 ; i < surface_count; i++) { | |||
1975 | enum surface_update_type type = | |||
1976 | det_surface_update(dc, &updates[i]); | |||
1977 | ||||
1978 | elevate_update_type(&overall_type, type); | |||
1979 | } | |||
1980 | ||||
1981 | return overall_type; | |||
1982 | } | |||
1983 | ||||
1984 | /** | |||
1985 | * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) | |||
1986 | * | |||
1987 | * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types | |||
1988 | */ | |||
1989 | enum surface_update_type dc_check_update_surfaces_for_stream( | |||
1990 | struct dc *dc, | |||
1991 | struct dc_surface_update *updates, | |||
1992 | int surface_count, | |||
1993 | struct dc_stream_update *stream_update, | |||
1994 | const struct dc_stream_status *stream_status) | |||
1995 | { | |||
1996 | int i; | |||
1997 | enum surface_update_type type; | |||
1998 | ||||
1999 | if (stream_update) | |||
2000 | stream_update->stream->update_flags.raw = 0; | |||
2001 | for (i = 0; i < surface_count; i++) | |||
2002 | updates[i].surface->update_flags.raw = 0; | |||
2003 | ||||
2004 | type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); | |||
2005 | if (type == UPDATE_TYPE_FULL) { | |||
2006 | if (stream_update) { | |||
2007 | uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; | |||
2008 | stream_update->stream->update_flags.raw = 0xFFFFFFFF; | |||
2009 | stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; | |||
2010 | } | |||
2011 | for (i = 0; i < surface_count; i++) | |||
2012 | updates[i].surface->update_flags.raw = 0xFFFFFFFF; | |||
2013 | } | |||
2014 | ||||
2015 | if (type == UPDATE_TYPE_FAST) { | |||
2016 | // If there's an available clock comparator, we use that. | |||
2017 | if (dc->clk_mgr->funcs->are_clock_states_equal) { | |||
2018 | if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) | |||
2019 | dc->optimized_required = true1; | |||
2020 | // Else we fallback to mem compare. | |||
2021 | } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support))__builtin_memcmp((&dc->current_state->bw_ctx.bw.dcn .clk), (&dc->clk_mgr->clks), (__builtin_offsetof(struct dc_clocks, prev_p_state_change_support))) != 0) { | |||
2022 | dc->optimized_required = true1; | |||
2023 | } | |||
2024 | ||||
2025 | dc->optimized_required |= dc->wm_optimized_required; | |||
2026 | } | |||
2027 | ||||
2028 | return type; | |||
2029 | } | |||
2030 | ||||
2031 | static struct dc_stream_status *stream_get_status( | |||
2032 | struct dc_state *ctx, | |||
2033 | struct dc_stream_state *stream) | |||
2034 | { | |||
2035 | uint8_t i; | |||
2036 | ||||
2037 | for (i = 0; i < ctx->stream_count; i++) { | |||
2038 | if (stream == ctx->streams[i]) { | |||
2039 | return &ctx->stream_status[i]; | |||
2040 | } | |||
2041 | } | |||
2042 | ||||
2043 | return NULL((void *)0); | |||
2044 | } | |||
2045 | ||||
2046 | static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; | |||
2047 | ||||
2048 | static void copy_surface_update_to_plane( | |||
2049 | struct dc_plane_state *surface, | |||
2050 | struct dc_surface_update *srf_update) | |||
2051 | { | |||
2052 | if (srf_update->flip_addr) { | |||
2053 | surface->address = srf_update->flip_addr->address; | |||
2054 | surface->flip_immediate = | |||
2055 | srf_update->flip_addr->flip_immediate; | |||
2056 | surface->time.time_elapsed_in_us[surface->time.index] = | |||
2057 | srf_update->flip_addr->flip_timestamp_in_us - | |||
2058 | surface->time.prev_update_time_in_us; | |||
2059 | surface->time.prev_update_time_in_us = | |||
2060 | srf_update->flip_addr->flip_timestamp_in_us; | |||
2061 | surface->time.index++; | |||
2062 | if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX10) | |||
2063 | surface->time.index = 0; | |||
2064 | ||||
2065 | surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; | |||
2066 | } | |||
2067 | ||||
2068 | if (srf_update->scaling_info) { | |||
2069 | surface->scaling_quality = | |||
2070 | srf_update->scaling_info->scaling_quality; | |||
2071 | surface->dst_rect = | |||
2072 | srf_update->scaling_info->dst_rect; | |||
2073 | surface->src_rect = | |||
2074 | srf_update->scaling_info->src_rect; | |||
2075 | surface->clip_rect = | |||
2076 | srf_update->scaling_info->clip_rect; | |||
2077 | } | |||
2078 | ||||
2079 | if (srf_update->plane_info) { | |||
2080 | surface->color_space = | |||
2081 | srf_update->plane_info->color_space; | |||
2082 | surface->format = | |||
2083 | srf_update->plane_info->format; | |||
2084 | surface->plane_size = | |||
2085 | srf_update->plane_info->plane_size; | |||
2086 | surface->rotation = | |||
2087 | srf_update->plane_info->rotation; | |||
2088 | surface->horizontal_mirror = | |||
2089 | srf_update->plane_info->horizontal_mirror; | |||
2090 | surface->stereo_format = | |||
2091 | srf_update->plane_info->stereo_format; | |||
2092 | surface->tiling_info = | |||
2093 | srf_update->plane_info->tiling_info; | |||
2094 | surface->visible = | |||
2095 | srf_update->plane_info->visible; | |||
2096 | surface->per_pixel_alpha = | |||
2097 | srf_update->plane_info->per_pixel_alpha; | |||
2098 | surface->global_alpha = | |||
2099 | srf_update->plane_info->global_alpha; | |||
2100 | surface->global_alpha_value = | |||
2101 | srf_update->plane_info->global_alpha_value; | |||
2102 | surface->dcc = | |||
2103 | srf_update->plane_info->dcc; | |||
2104 | surface->layer_index = | |||
2105 | srf_update->plane_info->layer_index; | |||
2106 | } | |||
2107 | ||||
2108 | if (srf_update->gamma && | |||
2109 | (surface->gamma_correction != | |||
2110 | srf_update->gamma)) { | |||
2111 | memcpy(&surface->gamma_correction->entries,__builtin_memcpy((&surface->gamma_correction->entries ), (&srf_update->gamma->entries), (sizeof(struct dc_gamma_entries ))) | |||
2112 | &srf_update->gamma->entries,__builtin_memcpy((&surface->gamma_correction->entries ), (&srf_update->gamma->entries), (sizeof(struct dc_gamma_entries ))) | |||
2113 | sizeof(struct dc_gamma_entries))__builtin_memcpy((&surface->gamma_correction->entries ), (&srf_update->gamma->entries), (sizeof(struct dc_gamma_entries ))); | |||
2114 | surface->gamma_correction->is_identity = | |||
2115 | srf_update->gamma->is_identity; | |||
2116 | surface->gamma_correction->num_entries = | |||
2117 | srf_update->gamma->num_entries; | |||
2118 | surface->gamma_correction->type = | |||
2119 | srf_update->gamma->type; | |||
2120 | } | |||
2121 | ||||
2122 | if (srf_update->in_transfer_func && | |||
2123 | (surface->in_transfer_func != | |||
2124 | srf_update->in_transfer_func)) { | |||
2125 | surface->in_transfer_func->sdr_ref_white_level = | |||
2126 | srf_update->in_transfer_func->sdr_ref_white_level; | |||
2127 | surface->in_transfer_func->tf = | |||
2128 | srf_update->in_transfer_func->tf; | |||
2129 | surface->in_transfer_func->type = | |||
2130 | srf_update->in_transfer_func->type; | |||
2131 | memcpy(&surface->in_transfer_func->tf_pts,__builtin_memcpy((&surface->in_transfer_func->tf_pts ), (&srf_update->in_transfer_func->tf_pts), (sizeof (struct dc_transfer_func_distributed_points))) | |||
2132 | &srf_update->in_transfer_func->tf_pts,__builtin_memcpy((&surface->in_transfer_func->tf_pts ), (&srf_update->in_transfer_func->tf_pts), (sizeof (struct dc_transfer_func_distributed_points))) | |||
2133 | sizeof(struct dc_transfer_func_distributed_points))__builtin_memcpy((&surface->in_transfer_func->tf_pts ), (&srf_update->in_transfer_func->tf_pts), (sizeof (struct dc_transfer_func_distributed_points))); | |||
2134 | } | |||
2135 | ||||
2136 | if (srf_update->func_shaper && | |||
2137 | (surface->in_shaper_func != | |||
2138 | srf_update->func_shaper)) | |||
2139 | memcpy(surface->in_shaper_func, srf_update->func_shaper,__builtin_memcpy((surface->in_shaper_func), (srf_update-> func_shaper), (sizeof(*surface->in_shaper_func))) | |||
2140 | sizeof(*surface->in_shaper_func))__builtin_memcpy((surface->in_shaper_func), (srf_update-> func_shaper), (sizeof(*surface->in_shaper_func))); | |||
2141 | ||||
2142 | if (srf_update->lut3d_func && | |||
2143 | (surface->lut3d_func != | |||
2144 | srf_update->lut3d_func)) | |||
2145 | memcpy(surface->lut3d_func, srf_update->lut3d_func,__builtin_memcpy((surface->lut3d_func), (srf_update->lut3d_func ), (sizeof(*surface->lut3d_func))) | |||
2146 | sizeof(*surface->lut3d_func))__builtin_memcpy((surface->lut3d_func), (srf_update->lut3d_func ), (sizeof(*surface->lut3d_func))); | |||
2147 | ||||
2148 | if (srf_update->hdr_mult.value) | |||
2149 | surface->hdr_mult = | |||
2150 | srf_update->hdr_mult; | |||
2151 | ||||
2152 | if (srf_update->blend_tf && | |||
2153 | (surface->blend_tf != | |||
2154 | srf_update->blend_tf)) | |||
2155 | memcpy(surface->blend_tf, srf_update->blend_tf,__builtin_memcpy((surface->blend_tf), (srf_update->blend_tf ), (sizeof(*surface->blend_tf))) | |||
2156 | sizeof(*surface->blend_tf))__builtin_memcpy((surface->blend_tf), (srf_update->blend_tf ), (sizeof(*surface->blend_tf))); | |||
2157 | ||||
2158 | if (srf_update->input_csc_color_matrix) | |||
2159 | surface->input_csc_color_matrix = | |||
2160 | *srf_update->input_csc_color_matrix; | |||
2161 | ||||
2162 | if (srf_update->coeff_reduction_factor) | |||
2163 | surface->coeff_reduction_factor = | |||
2164 | *srf_update->coeff_reduction_factor; | |||
2165 | ||||
2166 | if (srf_update->gamut_remap_matrix) | |||
2167 | surface->gamut_remap_matrix = | |||
2168 | *srf_update->gamut_remap_matrix; | |||
2169 | } | |||
2170 | ||||
2171 | static void copy_stream_update_to_stream(struct dc *dc, | |||
2172 | struct dc_state *context, | |||
2173 | struct dc_stream_state *stream, | |||
2174 | struct dc_stream_update *update) | |||
2175 | { | |||
2176 | struct dc_context *dc_ctx = dc->ctx; | |||
2177 | ||||
2178 | if (update == NULL((void *)0) || stream == NULL((void *)0)) | |||
2179 | return; | |||
2180 | ||||
2181 | if (update->src.height && update->src.width) | |||
2182 | stream->src = update->src; | |||
2183 | ||||
2184 | if (update->dst.height && update->dst.width) | |||
2185 | stream->dst = update->dst; | |||
2186 | ||||
2187 | if (update->out_transfer_func && | |||
2188 | stream->out_transfer_func != update->out_transfer_func) { | |||
2189 | stream->out_transfer_func->sdr_ref_white_level = | |||
2190 | update->out_transfer_func->sdr_ref_white_level; | |||
2191 | stream->out_transfer_func->tf = update->out_transfer_func->tf; | |||
2192 | stream->out_transfer_func->type = | |||
2193 | update->out_transfer_func->type; | |||
2194 | memcpy(&stream->out_transfer_func->tf_pts,__builtin_memcpy((&stream->out_transfer_func->tf_pts ), (&update->out_transfer_func->tf_pts), (sizeof(struct dc_transfer_func_distributed_points))) | |||
2195 | &update->out_transfer_func->tf_pts,__builtin_memcpy((&stream->out_transfer_func->tf_pts ), (&update->out_transfer_func->tf_pts), (sizeof(struct dc_transfer_func_distributed_points))) | |||
2196 | sizeof(struct dc_transfer_func_distributed_points))__builtin_memcpy((&stream->out_transfer_func->tf_pts ), (&update->out_transfer_func->tf_pts), (sizeof(struct dc_transfer_func_distributed_points))); | |||
2197 | } | |||
2198 | ||||
2199 | if (update->hdr_static_metadata) | |||
2200 | stream->hdr_static_metadata = *update->hdr_static_metadata; | |||
2201 | ||||
2202 | if (update->abm_level) | |||
2203 | stream->abm_level = *update->abm_level; | |||
2204 | ||||
2205 | if (update->periodic_interrupt0) | |||
2206 | stream->periodic_interrupt0 = *update->periodic_interrupt0; | |||
2207 | ||||
2208 | if (update->periodic_interrupt1) | |||
2209 | stream->periodic_interrupt1 = *update->periodic_interrupt1; | |||
2210 | ||||
2211 | if (update->gamut_remap) | |||
2212 | stream->gamut_remap_matrix = *update->gamut_remap; | |||
2213 | ||||
2214 | /* Note: this being updated after mode set is currently not a use case | |||
2215 | * however if it arises OCSC would need to be reprogrammed at the | |||
2216 | * minimum | |||
2217 | */ | |||
2218 | if (update->output_color_space) | |||
2219 | stream->output_color_space = *update->output_color_space; | |||
2220 | ||||
2221 | if (update->output_csc_transform) | |||
2222 | stream->csc_color_matrix = *update->output_csc_transform; | |||
2223 | ||||
2224 | if (update->vrr_infopacket) | |||
2225 | stream->vrr_infopacket = *update->vrr_infopacket; | |||
2226 | ||||
2227 | if (update->dpms_off) | |||
2228 | stream->dpms_off = *update->dpms_off; | |||
2229 | ||||
2230 | if (update->vsc_infopacket) | |||
2231 | stream->vsc_infopacket = *update->vsc_infopacket; | |||
2232 | ||||
2233 | if (update->vsp_infopacket) | |||
2234 | stream->vsp_infopacket = *update->vsp_infopacket; | |||
2235 | ||||
2236 | if (update->dither_option) | |||
2237 | stream->dither_option = *update->dither_option; | |||
2238 | /* update current stream with writeback info */ | |||
2239 | if (update->wb_update) { | |||
2240 | int i; | |||
2241 | ||||
2242 | stream->num_wb_info = update->wb_update->num_wb_info; | |||
2243 | ASSERT(stream->num_wb_info <= MAX_DWB_PIPES)do { if (({ static int __warned; int __ret = !!(!(stream-> num_wb_info <= 1)); if (__ret && !__warned) { printf ("WARNING %s failed at %s:%d\n", "!(stream->num_wb_info <= 1)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/core/amdgpu_dc.c", 2243); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); | |||
2244 | for (i = 0; i < stream->num_wb_info; i++) | |||
2245 | stream->writeback_info[i] = | |||
2246 | update->wb_update->writeback_info[i]; | |||
2247 | } | |||
2248 | if (update->dsc_config) { | |||
2249 | struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; | |||
2250 | uint32_t old_dsc_enabled = stream->timing.flags.DSC; | |||
2251 | uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && | |||
2252 | update->dsc_config->num_slices_v != 0); | |||
2253 | ||||
2254 | /* Use temporarry context for validating new DSC config */ | |||
2255 | struct dc_state *dsc_validate_context = dc_create_state(dc); | |||
2256 | ||||
2257 | if (dsc_validate_context) { | |||
2258 | dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); | |||
2259 | ||||
2260 | stream->timing.dsc_cfg = *update->dsc_config; | |||
2261 | stream->timing.flags.DSC = enable_dsc; | |||
2262 | if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true1)) { | |||
2263 | stream->timing.dsc_cfg = old_dsc_cfg; | |||
2264 | stream->timing.flags.DSC = old_dsc_enabled; | |||
2265 | update->dsc_config = NULL((void *)0); | |||
2266 | } | |||
2267 | ||||
2268 | dc_release_state(dsc_validate_context); | |||
2269 | } else { | |||
2270 | DC_ERROR("Failed to allocate new validate context for DSC change\n")do { (void)(dc_ctx); __drm_err("Failed to allocate new validate context for DSC change\n" ); } while (0); | |||
2271 | update->dsc_config = NULL((void *)0); | |||
2272 | } | |||
2273 | } | |||
2274 | } | |||
2275 | ||||
2276 | static void commit_planes_do_stream_update(struct dc *dc, | |||
2277 | struct dc_stream_state *stream, | |||
2278 | struct dc_stream_update *stream_update, | |||
2279 | enum surface_update_type update_type, | |||
2280 | struct dc_state *context) | |||
2281 | { | |||
2282 | int j; | |||
2283 | bool_Bool should_program_abm; | |||
2284 | ||||
2285 | // Stream updates | |||
2286 | for (j = 0; j < dc->res_pool->pipe_count; j++) { | |||
2287 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; | |||
2288 | ||||
2289 | if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { | |||
2290 | ||||
2291 | if (stream_update->periodic_interrupt0 && | |||
2292 | dc->hwss.setup_periodic_interrupt) | |||
2293 | dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0); | |||
2294 | ||||
2295 | if (stream_update->periodic_interrupt1 && | |||
2296 | dc->hwss.setup_periodic_interrupt) | |||
2297 | dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1); | |||
2298 | ||||
2299 | if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || | |||
2300 | stream_update->vrr_infopacket || | |||
2301 | stream_update->vsc_infopacket || | |||
2302 | stream_update->vsp_infopacket) { | |||
2303 | resource_build_info_frame(pipe_ctx); | |||
2304 | dc->hwss.update_info_frame(pipe_ctx); | |||
2305 | } | |||
2306 | ||||
2307 | if (stream_update->hdr_static_metadata && | |||
2308 | stream->use_dynamic_meta && | |||
2309 | dc->hwss.set_dmdata_attributes && | |||
2310 | pipe_ctx->stream->dmdata_address.quad_part != 0) | |||
2311 | dc->hwss.set_dmdata_attributes(pipe_ctx); | |||
2312 | ||||
2313 | if (stream_update->gamut_remap) | |||
2314 | dc_stream_set_gamut_remap(dc, stream); | |||
2315 | ||||
2316 | if (stream_update->output_csc_transform) | |||
2317 | dc_stream_program_csc_matrix(dc, stream); | |||
2318 | ||||
2319 | if (stream_update->dither_option) { | |||
2320 | struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; | |||
2321 | resource_build_bit_depth_reduction_params(pipe_ctx->stream, | |||
2322 | &pipe_ctx->stream->bit_depth_params); | |||
2323 | pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, | |||
2324 | &stream->bit_depth_params, | |||
2325 | &stream->clamping); | |||
2326 | while (odm_pipe) { | |||
2327 | odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, | |||
2328 | &stream->bit_depth_params, | |||
2329 | &stream->clamping); | |||
2330 | odm_pipe = odm_pipe->next_odm_pipe; | |||
2331 | } | |||
2332 | } | |||
2333 | ||||
2334 | /* Full fe update*/ | |||
2335 | if (update_type == UPDATE_TYPE_FAST) | |||
2336 | continue; | |||
2337 | ||||
2338 | if (stream_update->dsc_config) | |||
2339 | dp_update_dsc_config(pipe_ctx); | |||
2340 | ||||
2341 | if (stream_update->dpms_off) { | |||
2342 | if (*stream_update->dpms_off) { | |||
2343 | core_link_disable_stream(pipe_ctx); | |||
2344 | /* for dpms, keep acquired resources*/ | |||
2345 | if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) | |||
2346 | pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); | |||
2347 | ||||
2348 | dc->optimized_required = true1; | |||
2349 | ||||
2350 | } else { | |||
2351 | if (dc->optimize_seamless_boot_streams == 0) | |||
2352 | dc->hwss.prepare_bandwidth(dc, dc->current_state); | |||
2353 | ||||
2354 | core_link_enable_stream(dc->current_state, pipe_ctx); | |||
2355 | } | |||
2356 | } | |||
2357 | ||||
2358 | if (stream_update->abm_level && pipe_ctx->stream_res.abm) { | |||
2359 | should_program_abm = true1; | |||
2360 | ||||
2361 | // if otg funcs defined check if blanked before programming | |||
2362 | if (pipe_ctx->stream_res.tg->funcs->is_blanked) | |||
2363 | if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) | |||
2364 | should_program_abm = false0; | |||
2365 | ||||
2366 | if (should_program_abm) { | |||
2367 | if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE255) { | |||
2368 | dc->hwss.set_abm_immediate_disable(pipe_ctx); | |||
2369 | } else { | |||
2370 | pipe_ctx->stream_res.abm->funcs->set_abm_level( | |||
2371 | pipe_ctx->stream_res.abm, stream->abm_level); | |||
2372 | } | |||
2373 | } | |||
2374 | } | |||
2375 | } | |||
2376 | } | |||
2377 | } | |||
2378 | ||||
2379 | static void commit_planes_for_stream(struct dc *dc, | |||
2380 | struct dc_surface_update *srf_updates, | |||
2381 | int surface_count, | |||
2382 | struct dc_stream_state *stream, | |||
2383 | struct dc_stream_update *stream_update, | |||
2384 | enum surface_update_type update_type, | |||
2385 | struct dc_state *context) | |||
2386 | { | |||
2387 | bool_Bool mpcc_disconnected = false0; | |||
2388 | int i, j; | |||
2389 | struct pipe_ctx *top_pipe_to_program = NULL((void *)0); | |||
2390 | ||||
2391 | if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) { | |||
| ||||
2392 | /* Optimize seamless boot flag keeps clocks and watermarks high until | |||
2393 | * first flip. After first flip, optimization is required to lower | |||
2394 | * bandwidth. Important to note that it is expected UEFI will | |||
2395 | * only light up a single display on POST, therefore we only expect | |||
2396 | * one stream with seamless boot flag set. | |||
2397 | */ | |||
2398 | if (stream->apply_seamless_boot_optimization) { | |||
2399 | stream->apply_seamless_boot_optimization = false0; | |||
2400 | dc->optimize_seamless_boot_streams--; | |||
2401 | ||||
2402 | if (dc->optimize_seamless_boot_streams == 0) | |||
2403 | dc->optimized_required = true1; | |||
2404 | } | |||
2405 | } | |||
2406 | ||||
2407 | if (update_type == UPDATE_TYPE_FULL) { | |||
2408 | #if defined(CONFIG_DRM_AMD_DC_DCN3_01) | |||
2409 | dc_allow_idle_optimizations(dc, false0); | |||
2410 | ||||
2411 | #endif | |||
2412 | if (dc->optimize_seamless_boot_streams == 0) | |||
2413 | dc->hwss.prepare_bandwidth(dc, context); | |||
2414 | ||||
2415 | context_clock_trace(dc, context); | |||
2416 | } | |||
2417 | ||||
2418 | if (update_type != UPDATE_TYPE_FAST && dc->hwss.interdependent_update_lock && | |||
2419 | dc->hwss.disconnect_pipes && dc->hwss.wait_for_pending_cleared){ | |||
2420 | dc->hwss.interdependent_update_lock(dc, context, true1); | |||
2421 | mpcc_disconnected = dc->hwss.disconnect_pipes(dc, context); | |||
2422 | dc->hwss.interdependent_update_lock(dc, context, false0); | |||
2423 | if (mpcc_disconnected) | |||
2424 | dc->hwss.wait_for_pending_cleared(dc, context); | |||
2425 | } | |||
2426 | ||||
2427 | for (j = 0; j < dc->res_pool->pipe_count; j++) { | |||
2428 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; | |||
2429 | ||||
2430 | if (!pipe_ctx->top_pipe && | |||
2431 | !pipe_ctx->prev_odm_pipe && | |||
2432 | pipe_ctx->stream && | |||
2433 | pipe_ctx->stream == stream) { | |||
2434 | top_pipe_to_program = pipe_ctx; | |||
2435 | } | |||
2436 | } | |||
2437 | ||||
2438 | if ((update_type
| |||
2439 | if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { | |||
2440 | if (should_use_dmub_lock(stream->link)) { | |||
2441 | union dmub_hw_lock_flags hw_locks = { 0 }; | |||
2442 | struct dmub_hw_lock_inst_flags inst_flags = { 0 }; | |||
2443 | ||||
2444 | hw_locks.bits.lock_dig = 1; | |||
2445 | inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; | |||
2446 | ||||
2447 | dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, | |||
2448 | true1, | |||
2449 | &hw_locks, | |||
2450 | &inst_flags); | |||
2451 | } else | |||
2452 | top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( | |||
2453 | top_pipe_to_program->stream_res.tg); | |||
2454 | } | |||
2455 | ||||
2456 | if ((update_type
| |||
2457 | dc->hwss.interdependent_update_lock(dc, context, true1); | |||
2458 | else | |||
2459 | /* Lock the top pipe while updating plane addrs, since freesync requires | |||
2460 | * plane addr update event triggers to be synchronized. | |||
2461 | * top_pipe_to_program is expected to never be NULL | |||
2462 | */ | |||
2463 | dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true1); | |||
2464 | ||||
2465 | ||||
2466 | // Stream updates | |||
2467 | if (stream_update) | |||
2468 | commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); | |||
2469 | ||||
2470 | if (surface_count == 0) { | |||
2471 | /* | |||
2472 | * In case of turning off screen, no need to program front end a second time. | |||
2473 | * just return after program blank. | |||
2474 | */ | |||
2475 | if (dc->hwss.apply_ctx_for_surface) | |||
2476 | dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); | |||
2477 | if (dc->hwss.program_front_end_for_ctx) | |||
2478 | dc->hwss.program_front_end_for_ctx(dc, context); | |||
2479 | ||||
2480 | if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) | |||
2481 | dc->hwss.interdependent_update_lock(dc, context, false0); | |||
2482 | else | |||
2483 | dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false0); | |||
2484 | ||||
2485 | dc->hwss.post_unlock_program_front_end(dc, context); | |||
2486 | return; | |||
2487 | } | |||
2488 | ||||
2489 | if (!IS_DIAG_DC(dc->ctx->dce_environment)((dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) || ( dc->ctx->dce_environment == DCE_ENV_DIAG))) { | |||
2490 | for (i = 0; i < surface_count; i++) { | |||
2491 | struct dc_plane_state *plane_state = srf_updates[i].surface; | |||
2492 | /*set logical flag for lock/unlock use*/ | |||
2493 | for (j = 0; j < dc->res_pool->pipe_count; j++) { | |||
2494 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; | |||
2495 | if (!pipe_ctx->plane_state) | |||
2496 | continue; | |||
2497 | if (pipe_ctx->plane_state != plane_state) | |||
2498 | continue; | |||
2499 | plane_state->triplebuffer_flips = false0; | |||
2500 | if (update_type == UPDATE_TYPE_FAST && | |||
2501 | dc->hwss.program_triplebuffer != NULL((void *)0) && | |||
2502 | !plane_state->flip_immediate && dc->debug.enable_tri_buf) { | |||
2503 | /*triple buffer for VUpdate only*/ | |||
2504 | plane_state->triplebuffer_flips = true1; | |||
2505 | } | |||
2506 | } | |||
2507 | if (update_type == UPDATE_TYPE_FULL) { | |||
2508 | /* force vsync flip when reconfiguring pipes to prevent underflow */ | |||
2509 | plane_state->flip_immediate = false0; | |||
2510 | } | |||
2511 | } | |||
2512 | } | |||
2513 | ||||
2514 | // Update Type FULL, Surface updates | |||
2515 | for (j = 0; j < dc->res_pool->pipe_count; j++) { | |||
2516 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; | |||
2517 | ||||
2518 | if (!pipe_ctx->top_pipe && | |||
2519 | !pipe_ctx->prev_odm_pipe && | |||
2520 | pipe_ctx->stream && | |||
2521 | pipe_ctx->stream == stream) { | |||
2522 | struct dc_stream_status *stream_status = NULL((void *)0); | |||
2523 | ||||
2524 | if (!pipe_ctx->plane_state) | |||
2525 | continue; | |||
2526 | ||||
2527 | /* Full fe update*/ | |||
2528 | if (update_type == UPDATE_TYPE_FAST) | |||
2529 | continue; | |||
2530 | ||||
2531 | ASSERT(!pipe_ctx->plane_state->triplebuffer_flips)do { if (({ static int __warned; int __ret = !!(!(!pipe_ctx-> plane_state->triplebuffer_flips)); if (__ret && !__warned ) { printf("WARNING %s failed at %s:%d\n", "!(!pipe_ctx->plane_state->triplebuffer_flips)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/core/amdgpu_dc.c", 2531); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); | |||
2532 | ||||
2533 | if (dc->hwss.program_triplebuffer != NULL((void *)0) && dc->debug.enable_tri_buf) { | |||
2534 | /*turn off triple buffer for full update*/ | |||
2535 | dc->hwss.program_triplebuffer( | |||
2536 | dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); | |||
2537 | } | |||
2538 | stream_status = | |||
2539 | stream_get_status(context, pipe_ctx->stream); | |||
2540 | ||||
2541 | if (dc->hwss.apply_ctx_for_surface) | |||
2542 | dc->hwss.apply_ctx_for_surface( | |||
2543 | dc, pipe_ctx->stream, stream_status->plane_count, context); | |||
2544 | } | |||
2545 | } | |||
2546 | if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { | |||
2547 | dc->hwss.program_front_end_for_ctx(dc, context); | |||
2548 | #ifdef CONFIG_DRM_AMD_DC_DCN1 | |||
2549 | if (dc->debug.validate_dml_output) { | |||
2550 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |||
2551 | struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i]; | |||
2552 | if (cur_pipe.stream == NULL((void *)0)) | |||
2553 | continue; | |||
2554 | ||||
2555 | cur_pipe.plane_res.hubp->funcs->validate_dml_output( | |||
2556 | cur_pipe.plane_res.hubp, dc->ctx, | |||
2557 | &context->res_ctx.pipe_ctx[i].rq_regs, | |||
2558 | &context->res_ctx.pipe_ctx[i].dlg_regs, | |||
2559 | &context->res_ctx.pipe_ctx[i].ttu_regs); | |||
2560 | } | |||
2561 | } | |||
2562 | #endif | |||
2563 | } | |||
2564 | ||||
2565 | // Update Type FAST, Surface updates | |||
2566 | if (update_type
| |||
2567 | if (dc->hwss.set_flip_control_gsl) | |||
2568 | for (i = 0; i < surface_count; i++) { | |||
2569 | struct dc_plane_state *plane_state = srf_updates[i].surface; | |||
2570 | ||||
2571 | for (j = 0; j < dc->res_pool->pipe_count; j++) { | |||
2572 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; | |||
2573 | ||||
2574 | if (pipe_ctx->stream != stream
| |||
2575 | continue; | |||
2576 | ||||
2577 | if (pipe_ctx->plane_state != plane_state) | |||
2578 | continue; | |||
2579 | ||||
2580 | // GSL has to be used for flip immediate | |||
2581 | dc->hwss.set_flip_control_gsl(pipe_ctx, | |||
2582 | plane_state->flip_immediate); | |||
| ||||
2583 | } | |||
2584 | } | |||
2585 | /* Perform requested Updates */ | |||
2586 | for (i = 0; i < surface_count; i++) { | |||
2587 | struct dc_plane_state *plane_state = srf_updates[i].surface; | |||
2588 | ||||
2589 | for (j = 0; j < dc->res_pool->pipe_count; j++) { | |||
2590 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; | |||
2591 | ||||
2592 | if (pipe_ctx->stream != stream) | |||
2593 | continue; | |||
2594 | ||||
2595 | if (pipe_ctx->plane_state != plane_state) | |||
2596 | continue; | |||
2597 | /*program triple buffer after lock based on flip type*/ | |||
2598 | if (dc->hwss.program_triplebuffer != NULL((void *)0) && dc->debug.enable_tri_buf) { | |||
2599 | /*only enable triplebuffer for fast_update*/ | |||
2600 | dc->hwss.program_triplebuffer( | |||
2601 | dc, pipe_ctx, plane_state->triplebuffer_flips); | |||
2602 | } | |||
2603 | if (srf_updates[i].flip_addr) | |||
2604 | dc->hwss.update_plane_addr(dc, pipe_ctx); | |||
2605 | } | |||
2606 | } | |||
2607 | } | |||
2608 | ||||
2609 | if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) | |||
2610 | dc->hwss.interdependent_update_lock(dc, context, false0); | |||
2611 | else | |||
2612 | dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false0); | |||
2613 | ||||
2614 | if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) | |||
2615 | if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { | |||
2616 | top_pipe_to_program->stream_res.tg->funcs->wait_for_state( | |||
2617 | top_pipe_to_program->stream_res.tg, | |||
2618 | CRTC_STATE_VACTIVE); | |||
2619 | top_pipe_to_program->stream_res.tg->funcs->wait_for_state( | |||
2620 | top_pipe_to_program->stream_res.tg, | |||
2621 | CRTC_STATE_VBLANK); | |||
2622 | top_pipe_to_program->stream_res.tg->funcs->wait_for_state( | |||
2623 | top_pipe_to_program->stream_res.tg, | |||
2624 | CRTC_STATE_VACTIVE); | |||
2625 | ||||
2626 | if (stream && should_use_dmub_lock(stream->link)) { | |||
2627 | union dmub_hw_lock_flags hw_locks = { 0 }; | |||
2628 | struct dmub_hw_lock_inst_flags inst_flags = { 0 }; | |||
2629 | ||||
2630 | hw_locks.bits.lock_dig = 1; | |||
2631 | inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; | |||
2632 | ||||
2633 | dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, | |||
2634 | false0, | |||
2635 | &hw_locks, | |||
2636 | &inst_flags); | |||
2637 | } else | |||
2638 | top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( | |||
2639 | top_pipe_to_program->stream_res.tg); | |||
2640 | } | |||
2641 | ||||
2642 | if (update_type != UPDATE_TYPE_FAST) | |||
2643 | dc->hwss.post_unlock_program_front_end(dc, context); | |||
2644 | ||||
2645 | // Fire manual trigger only when bottom plane is flipped | |||
2646 | for (j = 0; j < dc->res_pool->pipe_count; j++) { | |||
2647 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; | |||
2648 | ||||
2649 | if (pipe_ctx->bottom_pipe || | |||
2650 | !pipe_ctx->stream || | |||
2651 | pipe_ctx->stream != stream || | |||
2652 | !pipe_ctx->plane_state->update_flags.bits.addr_update) | |||
2653 | continue; | |||
2654 | ||||
2655 | if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) | |||
2656 | pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); | |||
2657 | } | |||
2658 | } | |||
2659 | ||||
2660 | void dc_commit_updates_for_stream(struct dc *dc, | |||
2661 | struct dc_surface_update *srf_updates, | |||
2662 | int surface_count, | |||
2663 | struct dc_stream_state *stream, | |||
2664 | struct dc_stream_update *stream_update, | |||
2665 | struct dc_state *state) | |||
2666 | { | |||
2667 | const struct dc_stream_status *stream_status; | |||
2668 | enum surface_update_type update_type; | |||
2669 | struct dc_state *context; | |||
2670 | struct dc_context *dc_ctx = dc->ctx; | |||
2671 | int i, j; | |||
2672 | ||||
2673 | stream_status = dc_stream_get_status(stream); | |||
2674 | context = dc->current_state; | |||
2675 | ||||
2676 | update_type = dc_check_update_surfaces_for_stream( | |||
2677 | dc, srf_updates, surface_count, stream_update, stream_status); | |||
2678 | ||||
2679 | if (update_type >= update_surface_trace_level) | |||
2680 | update_surface_trace(dc, srf_updates, surface_count); | |||
2681 | ||||
2682 | ||||
2683 | if (update_type >= UPDATE_TYPE_FULL) { | |||
2684 | ||||
2685 | /* initialize scratch memory for building context */ | |||
2686 | context = dc_create_state(dc); | |||
2687 | if (context == NULL((void *)0)) { | |||
2688 | DC_ERROR("Failed to allocate new validate context!\n")do { (void)(dc_ctx); __drm_err("Failed to allocate new validate context!\n" ); } while (0); | |||
2689 | return; | |||
2690 | } | |||
2691 | ||||
2692 | dc_resource_state_copy_construct(state, context); | |||
2693 | ||||
2694 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |||
2695 | struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; | |||
2696 | struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; | |||
2697 | ||||
2698 | if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) | |||
2699 | new_pipe->plane_state->force_full_update = true1; | |||
2700 | } | |||
2701 | } | |||
2702 | ||||
2703 | ||||
2704 | for (i = 0; i < surface_count; i++) { | |||
2705 | struct dc_plane_state *surface = srf_updates[i].surface; | |||
2706 | ||||
2707 | copy_surface_update_to_plane(surface, &srf_updates[i]); | |||
2708 | ||||
2709 | if (update_type >= UPDATE_TYPE_MED) { | |||
2710 | for (j = 0; j < dc->res_pool->pipe_count; j++) { | |||
2711 | struct pipe_ctx *pipe_ctx = | |||
2712 | &context->res_ctx.pipe_ctx[j]; | |||
2713 | ||||
2714 | if (pipe_ctx->plane_state != surface) | |||
2715 | continue; | |||
2716 | ||||
2717 | resource_build_scaling_params(pipe_ctx); | |||
2718 | } | |||
2719 | } | |||
2720 | } | |||
2721 | ||||
2722 | copy_stream_update_to_stream(dc, context, stream, stream_update); | |||
2723 | ||||
2724 | if (update_type >= UPDATE_TYPE_FULL) { | |||
2725 | if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false0)) { | |||
2726 | DC_ERROR("Mode validation failed for stream update!\n")do { (void)(dc_ctx); __drm_err("Mode validation failed for stream update!\n" ); } while (0); | |||
2727 | dc_release_state(context); | |||
2728 | return; | |||
2729 | } | |||
2730 | } | |||
2731 | ||||
2732 | commit_planes_for_stream( | |||
2733 | dc, | |||
2734 | srf_updates, | |||
2735 | surface_count, | |||
2736 | stream, | |||
2737 | stream_update, | |||
2738 | update_type, | |||
2739 | context); | |||
2740 | /*update current_State*/ | |||
2741 | if (dc->current_state != context) { | |||
2742 | ||||
2743 | struct dc_state *old = dc->current_state; | |||
2744 | ||||
2745 | dc->current_state = context; | |||
2746 | dc_release_state(old); | |||
2747 | ||||
2748 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | |||
2749 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | |||
2750 | ||||
2751 | if (pipe_ctx->plane_state && pipe_ctx->stream == stream) | |||
2752 | pipe_ctx->plane_state->force_full_update = false0; | |||
2753 | } | |||
2754 | } | |||
2755 | /*let's use current_state to update watermark etc*/ | |||
2756 | if (update_type >= UPDATE_TYPE_FULL) | |||
2757 | dc_post_update_surfaces_to_stream(dc); | |||
2758 | ||||
2759 | return; | |||
2760 | ||||
2761 | } | |||
2762 | ||||
2763 | uint8_t dc_get_current_stream_count(struct dc *dc) | |||
2764 | { | |||
2765 | return dc->current_state->stream_count; | |||
2766 | } | |||
2767 | ||||
2768 | struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) | |||
2769 | { | |||
2770 | if (i < dc->current_state->stream_count) | |||
2771 | return dc->current_state->streams[i]; | |||
2772 | return NULL((void *)0); | |||
2773 | } | |||
2774 | ||||
2775 | struct dc_stream_state *dc_stream_find_from_link(const struct dc_link *link) | |||
2776 | { | |||
2777 | uint8_t i; | |||
2778 | struct dc_context *ctx = link->ctx; | |||
2779 | ||||
2780 | for (i = 0; i < ctx->dc->current_state->stream_count; i++) { | |||
2781 | if (ctx->dc->current_state->streams[i]->link == link) | |||
2782 | return ctx->dc->current_state->streams[i]; | |||
2783 | } | |||
2784 | ||||
2785 | return NULL((void *)0); | |||
2786 | } | |||
2787 | ||||
2788 | enum dc_irq_source dc_interrupt_to_irq_source( | |||
2789 | struct dc *dc, | |||
2790 | uint32_t src_id, | |||
2791 | uint32_t ext_id) | |||
2792 | { | |||
2793 | return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); | |||
2794 | } | |||
2795 | ||||
2796 | /** | |||
2797 | * dc_interrupt_set() - Enable/disable an AMD hw interrupt source | |||
2798 | */ | |||
2799 | bool_Bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool_Bool enable) | |||
2800 | { | |||
2801 | ||||
2802 | if (dc == NULL((void *)0)) | |||
2803 | return false0; | |||
2804 | ||||
2805 | return dal_irq_service_set(dc->res_pool->irqs, src, enable); | |||
2806 | } | |||
2807 | ||||
2808 | void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) | |||
2809 | { | |||
2810 | dal_irq_service_ack(dc->res_pool->irqs, src); | |||
2811 | } | |||
2812 | ||||
2813 | void dc_power_down_on_boot(struct dc *dc) | |||
2814 | { | |||
2815 | if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && | |||
2816 | dc->hwss.power_down_on_boot) | |||
2817 | dc->hwss.power_down_on_boot(dc); | |||
2818 | } | |||
2819 | ||||
2820 | void dc_set_power_state( | |||
2821 | struct dc *dc, | |||
2822 | enum dc_acpi_cm_power_state power_state) | |||
2823 | { | |||
2824 | struct kref refcount; | |||
2825 | struct display_mode_lib *dml; | |||
2826 | ||||
2827 | switch (power_state) { | |||
2828 | case DC_ACPI_CM_POWER_STATE_D0: | |||
2829 | dc_resource_state_construct(dc, dc->current_state); | |||
2830 | ||||
2831 | if (dc->ctx->dmub_srv) | |||
2832 | dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); | |||
2833 | ||||
2834 | dc->hwss.init_hw(dc); | |||
2835 | ||||
2836 | if (dc->hwss.init_sys_ctx != NULL((void *)0) && | |||
2837 | dc->vm_pa_config.valid) { | |||
2838 | dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); | |||
2839 | } | |||
2840 | ||||
2841 | break; | |||
2842 | default: | |||
2843 | ASSERT(dc->current_state->stream_count == 0)do { if (({ static int __warned; int __ret = !!(!(dc->current_state ->stream_count == 0)); if (__ret && !__warned) { printf ("WARNING %s failed at %s:%d\n", "!(dc->current_state->stream_count == 0)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/core/amdgpu_dc.c", 2843); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); | |||
2844 | /* Zero out the current context so that on resume we start with | |||
2845 | * clean state, and dc hw programming optimizations will not | |||
2846 | * cause any trouble. | |||
2847 | */ | |||
2848 | dml = kzalloc(sizeof(struct display_mode_lib), | |||
2849 | GFP_KERNEL(0x0001 | 0x0004)); | |||
2850 | ||||
2851 | ASSERT(dml)do { if (({ static int __warned; int __ret = !!(!(dml)); if ( __ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(dml)", "/usr/src/sys/dev/pci/drm/amd/display/dc/core/amdgpu_dc.c" , 2851); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); | |||
2852 | if (!dml) | |||
2853 | return; | |||
2854 | ||||
2855 | /* Preserve refcount */ | |||
2856 | refcount = dc->current_state->refcount; | |||
2857 | /* Preserve display mode lib */ | |||
2858 | memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib))__builtin_memcpy((dml), (&dc->current_state->bw_ctx .dml), (sizeof(struct display_mode_lib))); | |||
2859 | ||||
2860 | dc_resource_state_destruct(dc->current_state); | |||
2861 | memset(dc->current_state, 0,__builtin_memset((dc->current_state), (0), (sizeof(*dc-> current_state))) | |||
2862 | sizeof(*dc->current_state))__builtin_memset((dc->current_state), (0), (sizeof(*dc-> current_state))); | |||
2863 | ||||
2864 | dc->current_state->refcount = refcount; | |||
2865 | dc->current_state->bw_ctx.dml = *dml; | |||
2866 | ||||
2867 | kfree(dml); | |||
2868 | ||||
2869 | break; | |||
2870 | } | |||
2871 | } | |||
2872 | ||||
2873 | void dc_resume(struct dc *dc) | |||
2874 | { | |||
2875 | uint32_t i; | |||
2876 | ||||
2877 | for (i = 0; i < dc->link_count; i++) | |||
2878 | core_link_resume(dc->links[i]); | |||
2879 | } | |||
2880 | ||||
2881 | bool_Bool dc_is_dmcu_initialized(struct dc *dc) | |||
2882 | { | |||
2883 | struct dmcu *dmcu = dc->res_pool->dmcu; | |||
2884 | ||||
2885 | if (dmcu) | |||
2886 | return dmcu->funcs->is_dmcu_initialized(dmcu); | |||
2887 | return false0; | |||
2888 | } | |||
2889 | ||||
2890 | bool_Bool dc_submit_i2c( | |||
2891 | struct dc *dc, | |||
2892 | uint32_t link_index, | |||
2893 | struct i2c_command *cmd) | |||
2894 | { | |||
2895 | ||||
2896 | struct dc_link *link = dc->links[link_index]; | |||
2897 | struct ddc_service *ddc = link->ddc; | |||
2898 | return dce_i2c_submit_command( | |||
2899 | dc->res_pool, | |||
2900 | ddc->ddc_pin, | |||
2901 | cmd); | |||
2902 | } | |||
2903 | ||||
2904 | bool_Bool dc_submit_i2c_oem( | |||
2905 | struct dc *dc, | |||
2906 | struct i2c_command *cmd) | |||
2907 | { | |||
2908 | struct ddc_service *ddc = dc->res_pool->oem_device; | |||
2909 | return dce_i2c_submit_command( | |||
2910 | dc->res_pool, | |||
2911 | ddc->ddc_pin, | |||
2912 | cmd); | |||
2913 | } | |||
2914 | ||||
2915 | static bool_Bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink) | |||
2916 | { | |||
2917 | if (dc_link->sink_count >= MAX_SINKS_PER_LINK4) { | |||
2918 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 2918); do {} while (0); } while (0); | |||
2919 | return false0; | |||
2920 | } | |||
2921 | ||||
2922 | dc_sink_retain(sink); | |||
2923 | ||||
2924 | dc_link->remote_sinks[dc_link->sink_count] = sink; | |||
2925 | dc_link->sink_count++; | |||
2926 | ||||
2927 | return true1; | |||
2928 | } | |||
2929 | ||||
2930 | /** | |||
2931 | * dc_link_add_remote_sink() - Create a sink and attach it to an existing link | |||
2932 | * | |||
2933 | * EDID length is in bytes | |||
2934 | */ | |||
2935 | struct dc_sink *dc_link_add_remote_sink( | |||
2936 | struct dc_link *link, | |||
2937 | const uint8_t *edid, | |||
2938 | int len, | |||
2939 | struct dc_sink_init_data *init_data) | |||
2940 | { | |||
2941 | struct dc_sink *dc_sink; | |||
2942 | enum dc_edid_status edid_status; | |||
2943 | ||||
2944 | if (len > DC_MAX_EDID_BUFFER_SIZE1280) { | |||
2945 | dm_error("Max EDID buffer size breached!\n")__drm_err("Max EDID buffer size breached!\n"); | |||
2946 | return NULL((void *)0); | |||
2947 | } | |||
2948 | ||||
2949 | if (!init_data) { | |||
2950 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 2950); do {} while (0); } while (0); | |||
2951 | return NULL((void *)0); | |||
2952 | } | |||
2953 | ||||
2954 | if (!init_data->link) { | |||
2955 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 2955); do {} while (0); } while (0); | |||
2956 | return NULL((void *)0); | |||
2957 | } | |||
2958 | ||||
2959 | dc_sink = dc_sink_create(init_data); | |||
2960 | ||||
2961 | if (!dc_sink) | |||
2962 | return NULL((void *)0); | |||
2963 | ||||
2964 | memmove(dc_sink->dc_edid.raw_edid, edid, len)__builtin_memmove((dc_sink->dc_edid.raw_edid), (edid), (len )); | |||
2965 | dc_sink->dc_edid.length = len; | |||
2966 | ||||
2967 | if (!link_add_remote_sink_helper( | |||
2968 | link, | |||
2969 | dc_sink)) | |||
2970 | goto fail_add_sink; | |||
2971 | ||||
2972 | edid_status = dm_helpers_parse_edid_caps( | |||
2973 | link->ctx, | |||
2974 | &dc_sink->dc_edid, | |||
2975 | &dc_sink->edid_caps); | |||
2976 | ||||
2977 | /* | |||
2978 | * Treat device as no EDID device if EDID | |||
2979 | * parsing fails | |||
2980 | */ | |||
2981 | if (edid_status != EDID_OK) { | |||
2982 | dc_sink->dc_edid.length = 0; | |||
2983 | dm_error("Bad EDID, status%d!\n", edid_status)__drm_err("Bad EDID, status%d!\n", edid_status); | |||
2984 | } | |||
2985 | ||||
2986 | return dc_sink; | |||
2987 | ||||
2988 | fail_add_sink: | |||
2989 | dc_sink_release(dc_sink); | |||
2990 | return NULL((void *)0); | |||
2991 | } | |||
2992 | ||||
2993 | /** | |||
2994 | * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link | |||
2995 | * | |||
2996 | * Note that this just removes the struct dc_sink - it doesn't | |||
2997 | * program hardware or alter other members of dc_link | |||
2998 | */ | |||
2999 | void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) | |||
3000 | { | |||
3001 | int i; | |||
3002 | ||||
3003 | if (!link->sink_count) { | |||
3004 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 3004); do {} while (0); } while (0); | |||
3005 | return; | |||
3006 | } | |||
3007 | ||||
3008 | for (i = 0; i < link->sink_count; i++) { | |||
3009 | if (link->remote_sinks[i] == sink) { | |||
3010 | dc_sink_release(sink); | |||
3011 | link->remote_sinks[i] = NULL((void *)0); | |||
3012 | ||||
3013 | /* shrink array to remove empty place */ | |||
3014 | while (i < link->sink_count - 1) { | |||
3015 | link->remote_sinks[i] = link->remote_sinks[i+1]; | |||
3016 | i++; | |||
3017 | } | |||
3018 | link->remote_sinks[i] = NULL((void *)0); | |||
3019 | link->sink_count--; | |||
3020 | return; | |||
3021 | } | |||
3022 | } | |||
3023 | } | |||
3024 | ||||
3025 | void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) | |||
3026 | { | |||
3027 | info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; | |||
3028 | info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; | |||
3029 | info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; | |||
3030 | info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; | |||
3031 | info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; | |||
3032 | info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; | |||
3033 | info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; | |||
3034 | info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; | |||
3035 | info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; | |||
3036 | } | |||
3037 | enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) | |||
3038 | { | |||
3039 | if (dc->hwss.set_clock) | |||
3040 | return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); | |||
3041 | return DC_ERROR_UNEXPECTED; | |||
3042 | } | |||
3043 | void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) | |||
3044 | { | |||
3045 | if (dc->hwss.get_clock) | |||
3046 | dc->hwss.get_clock(dc, clock_type, clock_cfg); | |||
3047 | } | |||
3048 | ||||
3049 | /* enable/disable eDP PSR without specify stream for eDP */ | |||
3050 | bool_Bool dc_set_psr_allow_active(struct dc *dc, bool_Bool enable) | |||
3051 | { | |||
3052 | int i; | |||
3053 | ||||
3054 | for (i = 0; i < dc->current_state->stream_count ; i++) { | |||
3055 | struct dc_link *link; | |||
3056 | struct dc_stream_state *stream = dc->current_state->streams[i]; | |||
3057 | ||||
3058 | link = stream->link; | |||
3059 | if (!link) | |||
3060 | continue; | |||
3061 | ||||
3062 | if (link->psr_settings.psr_feature_enabled) { | |||
3063 | if (enable && !link->psr_settings.psr_allow_active) | |||
3064 | return dc_link_set_psr_allow_active(link, true1, false0); | |||
3065 | else if (!enable && link->psr_settings.psr_allow_active) | |||
3066 | return dc_link_set_psr_allow_active(link, false0, true1); | |||
3067 | } | |||
3068 | } | |||
3069 | ||||
3070 | return true1; | |||
3071 | } | |||
3072 | ||||
3073 | #if defined(CONFIG_DRM_AMD_DC_DCN3_01) | |||
3074 | ||||
3075 | void dc_allow_idle_optimizations(struct dc *dc, bool_Bool allow) | |||
3076 | { | |||
3077 | if (dc->debug.disable_idle_power_optimizations) | |||
3078 | return; | |||
3079 | ||||
3080 | if (allow == dc->idle_optimizations_allowed) | |||
3081 | return; | |||
3082 | ||||
3083 | if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow)) | |||
3084 | dc->idle_optimizations_allowed = allow; | |||
3085 | } | |||
3086 | ||||
3087 | /* | |||
3088 | * blank all streams, and set min and max memory clock to | |||
3089 | * lowest and highest DPM level, respectively | |||
3090 | */ | |||
3091 | void dc_unlock_memory_clock_frequency(struct dc *dc) | |||
3092 | { | |||
3093 | unsigned int i; | |||
3094 | ||||
3095 | for (i = 0; i < MAX_PIPES6; i++) | |||
3096 | if (dc->current_state->res_ctx.pipe_ctx[i].plane_state) | |||
3097 | core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]); | |||
3098 | ||||
3099 | dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false0); | |||
3100 | dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); | |||
3101 | } | |||
3102 | ||||
3103 | /* | |||
3104 | * set min memory clock to the min required for current mode, | |||
3105 | * max to maxDPM, and unblank streams | |||
3106 | */ | |||
3107 | void dc_lock_memory_clock_frequency(struct dc *dc) | |||
3108 | { | |||
3109 | unsigned int i; | |||
3110 | ||||
3111 | dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); | |||
3112 | dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true1); | |||
3113 | dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); | |||
3114 | ||||
3115 | for (i = 0; i < MAX_PIPES6; i++) | |||
3116 | if (dc->current_state->res_ctx.pipe_ctx[i].plane_state) | |||
3117 | core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); | |||
3118 | } | |||
3119 | ||||
3120 | bool_Bool dc_is_plane_eligible_for_idle_optimizaitons(struct dc *dc, | |||
3121 | struct dc_plane_state *plane) | |||
3122 | { | |||
3123 | return false0; | |||
3124 | } | |||
3125 | #endif |