File: | dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c |
Warning: | line 302, column 3 Value stored to 'dwb' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2020 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: AMD |
23 | * |
24 | */ |
25 | |
26 | |
27 | #include "dm_services.h" |
28 | #include "dm_helpers.h" |
29 | #include "core_types.h" |
30 | #include "resource.h" |
31 | #include "dcn30_hwseq.h" |
32 | #include "dccg.h" |
33 | #include "dce/dce_hwseq.h" |
34 | #include "dcn30_mpc.h" |
35 | #include "dcn30_dpp.h" |
36 | #include "dcn10/dcn10_cm_common.h" |
37 | #include "dcn30_cm_common.h" |
38 | #include "reg_helper.h" |
39 | #include "abm.h" |
40 | #include "clk_mgr.h" |
41 | #include "hubp.h" |
42 | #include "dchubbub.h" |
43 | #include "timing_generator.h" |
44 | #include "opp.h" |
45 | #include "ipp.h" |
46 | #include "mpc.h" |
47 | #include "mcif_wb.h" |
48 | #include "dc_dmub_srv.h" |
49 | #include "link_hwss.h" |
50 | #include "dpcd_defs.h" |
51 | #include "../dcn20/dcn20_hwseq.h" |
52 | #include "dcn30_resource.h" |
53 | #include "inc/dc_link_dp.h" |
54 | #include "inc/link_dpcd.h" |
55 | |
56 | |
57 | |
58 | |
59 | #define DC_LOGGER_INIT(logger) |
60 | |
61 | #define CTXhws->ctx \ |
62 | hws->ctx |
63 | #define REG(reg)hws->regs->reg\ |
64 | hws->regs->reg |
65 | #define DC_LOGGERdc->ctx->logger \ |
66 | dc->ctx->logger |
67 | |
68 | |
69 | #undef FN |
70 | #define FN(reg_name, field_name)hws->shifts->field_name, hws->masks->field_name \ |
71 | hws->shifts->field_name, hws->masks->field_name |
72 | |
73 | bool_Bool dcn30_set_blend_lut( |
74 | struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) |
75 | { |
76 | struct dpp *dpp_base = pipe_ctx->plane_res.dpp; |
77 | bool_Bool result = true1; |
78 | struct pwl_params *blend_lut = NULL((void *)0); |
79 | |
80 | if (plane_state->blend_tf) { |
81 | if (plane_state->blend_tf->type == TF_TYPE_HWPWL) |
82 | blend_lut = &plane_state->blend_tf->pwl; |
83 | else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { |
84 | cm3_helper_translate_curve_to_hw_format( |
85 | plane_state->blend_tf, &dpp_base->regamma_params, false0); |
86 | blend_lut = &dpp_base->regamma_params; |
87 | } |
88 | } |
89 | result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); |
90 | |
91 | return result; |
92 | } |
93 | |
94 | static bool_Bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx, |
95 | const struct dc_stream_state *stream) |
96 | { |
97 | struct dpp *dpp_base = pipe_ctx->plane_res.dpp; |
98 | int mpcc_id = pipe_ctx->plane_res.hubp->inst; |
99 | struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; |
100 | bool_Bool result = false0; |
101 | int acquired_rmu = 0; |
102 | int mpcc_id_projected = 0; |
103 | |
104 | const struct pwl_params *shaper_lut = NULL((void *)0); |
105 | //get the shaper lut params |
106 | if (stream->func_shaper) { |
107 | if (stream->func_shaper->type == TF_TYPE_HWPWL) { |
108 | shaper_lut = &stream->func_shaper->pwl; |
109 | } else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { |
110 | cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper, |
111 | &dpp_base->shaper_params, true1); |
112 | shaper_lut = &dpp_base->shaper_params; |
113 | } |
114 | } |
115 | |
116 | if (stream->lut3d_func && |
117 | stream->lut3d_func->state.bits.initialized == 1 && |
118 | stream->lut3d_func->state.bits.rmu_idx_valid == 1) { |
119 | if (stream->lut3d_func->state.bits.rmu_mux_num == 0) |
120 | mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux; |
121 | else if (stream->lut3d_func->state.bits.rmu_mux_num == 1) |
122 | mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu1_mux; |
123 | else if (stream->lut3d_func->state.bits.rmu_mux_num == 2) |
124 | mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux; |
125 | if (mpcc_id_projected != mpcc_id) |
126 | BREAK_TO_DEBUGGER()do { ___drm_dbg(((void *)0), DRM_UT_DRIVER, "%s():%d\n", __func__ , 126); do {} while (0); } while (0); |
127 | /* find the reason why logical layer assigned a different |
128 | * mpcc_id into acquire_post_bldn_3dlut |
129 | */ |
130 | acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id, |
131 | stream->lut3d_func->state.bits.rmu_mux_num); |
132 | if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num) |
133 | BREAK_TO_DEBUGGER()do { ___drm_dbg(((void *)0), DRM_UT_DRIVER, "%s():%d\n", __func__ , 133); do {} while (0); } while (0); |
134 | |
135 | result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d, |
136 | stream->lut3d_func->state.bits.rmu_mux_num); |
137 | result = mpc->funcs->program_shaper(mpc, shaper_lut, |
138 | stream->lut3d_func->state.bits.rmu_mux_num); |
139 | } else { |
140 | // loop through the available mux and release the requested mpcc_id |
141 | mpc->funcs->release_rmu(mpc, mpcc_id); |
142 | } |
143 | |
144 | return result; |
145 | } |
146 | |
147 | bool_Bool dcn30_set_input_transfer_func(struct dc *dc, |
148 | struct pipe_ctx *pipe_ctx, |
149 | const struct dc_plane_state *plane_state) |
150 | { |
151 | struct dce_hwseq *hws = dc->hwseq; |
152 | struct dpp *dpp_base = pipe_ctx->plane_res.dpp; |
153 | enum dc_transfer_func_predefined tf; |
154 | bool_Bool result = true1; |
155 | struct pwl_params *params = NULL((void *)0); |
156 | |
157 | if (dpp_base == NULL((void *)0) || plane_state == NULL((void *)0)) |
158 | return false0; |
159 | |
160 | tf = TRANSFER_FUNCTION_UNITY; |
161 | |
162 | if (plane_state->in_transfer_func && |
163 | plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED) |
164 | tf = plane_state->in_transfer_func->tf; |
165 | |
166 | dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf); |
167 | |
168 | if (plane_state->in_transfer_func) { |
169 | if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL) |
170 | params = &plane_state->in_transfer_func->pwl; |
171 | else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && |
172 | cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func, |
173 | &dpp_base->degamma_params, false0)) |
174 | params = &dpp_base->degamma_params; |
175 | } |
176 | |
177 | result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); |
178 | |
179 | if (pipe_ctx->stream_res.opp && pipe_ctx->stream_res.opp->ctx) { |
180 | if (dpp_base->funcs->dpp_program_blnd_lut) |
181 | hws->funcs.set_blend_lut(pipe_ctx, plane_state); |
182 | if (dpp_base->funcs->dpp_program_shaper_lut && |
183 | dpp_base->funcs->dpp_program_3dlut) |
184 | hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state); |
185 | } |
186 | |
187 | return result; |
188 | } |
189 | |
190 | bool_Bool dcn30_set_output_transfer_func(struct dc *dc, |
191 | struct pipe_ctx *pipe_ctx, |
192 | const struct dc_stream_state *stream) |
193 | { |
194 | int mpcc_id = pipe_ctx->plane_res.hubp->inst; |
195 | struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; |
196 | struct pwl_params *params = NULL((void *)0); |
197 | bool_Bool ret = false0; |
198 | |
199 | /* program OGAM or 3DLUT only for the top pipe*/ |
200 | if (pipe_ctx->top_pipe == NULL((void *)0)) { |
201 | /*program rmu shaper and 3dlut in MPC*/ |
202 | ret = dcn30_set_mpc_shaper_3dlut(pipe_ctx, stream); |
203 | if (ret == false0 && mpc->funcs->set_output_gamma && stream->out_transfer_func) { |
204 | if (stream->out_transfer_func->type == TF_TYPE_HWPWL) |
205 | params = &stream->out_transfer_func->pwl; |
206 | else if (pipe_ctx->stream->out_transfer_func->type == |
207 | TF_TYPE_DISTRIBUTED_POINTS && |
208 | cm3_helper_translate_curve_to_hw_format( |
209 | stream->out_transfer_func, |
210 | &mpc->blender_params, false0)) |
211 | params = &mpc->blender_params; |
212 | /* there are no ROM LUTs in OUTGAM */ |
213 | if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) |
214 | BREAK_TO_DEBUGGER()do { ___drm_dbg(((void *)0), DRM_UT_DRIVER, "%s():%d\n", __func__ , 214); do {} while (0); } while (0); |
215 | } |
216 | } |
217 | |
218 | mpc->funcs->set_output_gamma(mpc, mpcc_id, params); |
219 | return ret; |
220 | } |
221 | |
222 | static void dcn30_set_writeback( |
223 | struct dc *dc, |
224 | struct dc_writeback_info *wb_info, |
225 | struct dc_state *context) |
226 | { |
227 | struct mcif_wb *mcif_wb; |
228 | struct mcif_buf_params *mcif_buf_params; |
229 | |
230 | ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES)do { if (({ static int __warned; int __ret = !!(!(wb_info-> dwb_pipe_inst < 1)); if (__ret && !__warned) { printf ("WARNING %s failed at %s:%d\n", "!(wb_info->dwb_pipe_inst < 1)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c" , 230); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
231 | ASSERT(wb_info->wb_enabled)do { if (({ static int __warned; int __ret = !!(!(wb_info-> wb_enabled)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(wb_info->wb_enabled)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c" , 231); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
232 | ASSERT(wb_info->mpcc_inst >= 0)do { if (({ static int __warned; int __ret = !!(!(wb_info-> mpcc_inst >= 0)); if (__ret && !__warned) { printf ("WARNING %s failed at %s:%d\n", "!(wb_info->mpcc_inst >= 0)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c" , 232); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
233 | ASSERT(wb_info->mpcc_inst < dc->res_pool->mpcc_count)do { if (({ static int __warned; int __ret = !!(!(wb_info-> mpcc_inst < dc->res_pool->mpcc_count)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n", "!(wb_info->mpcc_inst < dc->res_pool->mpcc_count)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c" , 233); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
234 | mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; |
235 | mcif_buf_params = &wb_info->mcif_buf_params; |
236 | |
237 | /* set DWB MPC mux */ |
238 | dc->res_pool->mpc->funcs->set_dwb_mux(dc->res_pool->mpc, |
239 | wb_info->dwb_pipe_inst, wb_info->mpcc_inst); |
240 | /* set MCIF_WB buffer and arbitration configuration */ |
241 | mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, wb_info->dwb_params.dest_height); |
242 | mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); |
243 | } |
244 | |
245 | void dcn30_update_writeback( |
246 | struct dc *dc, |
247 | struct dc_writeback_info *wb_info, |
248 | struct dc_state *context) |
249 | { |
250 | struct dwbc *dwb; |
251 | dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; |
252 | DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\___drm_dbg(((void *)0), DRM_UT_KMS, "%s dwb_pipe_inst = %d, mpcc_inst = %d" , __func__, wb_info->dwb_pipe_inst, wb_info->mpcc_inst) |
253 | __func__, wb_info->dwb_pipe_inst,\___drm_dbg(((void *)0), DRM_UT_KMS, "%s dwb_pipe_inst = %d, mpcc_inst = %d" , __func__, wb_info->dwb_pipe_inst, wb_info->mpcc_inst) |
254 | wb_info->mpcc_inst)___drm_dbg(((void *)0), DRM_UT_KMS, "%s dwb_pipe_inst = %d, mpcc_inst = %d" , __func__, wb_info->dwb_pipe_inst, wb_info->mpcc_inst); |
255 | |
256 | dcn30_set_writeback(dc, wb_info, context); |
257 | |
258 | /* update DWB */ |
259 | dwb->funcs->update(dwb, &wb_info->dwb_params); |
260 | } |
261 | |
262 | bool_Bool dcn30_mmhubbub_warmup( |
263 | struct dc *dc, |
264 | unsigned int num_dwb, |
265 | struct dc_writeback_info *wb_info) |
266 | { |
267 | struct dwbc *dwb; |
268 | struct mcif_wb *mcif_wb; |
269 | struct mcif_warmup_params warmup_params = {0}; |
270 | unsigned int i, i_buf; |
271 | /*make sure there is no active DWB eanbled */ |
272 | for (i = 0; i < num_dwb; i++) { |
273 | dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst]; |
274 | if (dwb->dwb_is_efc_transition || dwb->dwb_is_drc) { |
275 | /*can not do warmup while any dwb enabled*/ |
276 | return false0; |
277 | } |
278 | } |
279 | |
280 | if (wb_info->mcif_warmup_params.p_vmid == 0) |
281 | return false0; |
282 | |
283 | /*check whether this is new interface: warmup big buffer once*/ |
284 | if (wb_info->mcif_warmup_params.start_address.quad_part != 0 && |
285 | wb_info->mcif_warmup_params.region_size != 0) { |
286 | /*mmhubbub is shared, so it does not matter which MCIF*/ |
287 | mcif_wb = dc->res_pool->mcif_wb[0]; |
288 | /*warmup a big chunk of VM buffer at once*/ |
289 | warmup_params.start_address.quad_part = wb_info->mcif_warmup_params.start_address.quad_part; |
290 | warmup_params.address_increment = wb_info->mcif_warmup_params.region_size; |
291 | warmup_params.region_size = wb_info->mcif_warmup_params.region_size; |
292 | warmup_params.p_vmid = wb_info->mcif_warmup_params.p_vmid; |
293 | |
294 | if (warmup_params.address_increment == 0) |
295 | warmup_params.address_increment = dc->dml.soc.vmm_page_size_bytes; |
296 | |
297 | mcif_wb->funcs->warmup_mcif(mcif_wb, &warmup_params); |
298 | return true1; |
299 | } |
300 | /*following is the original: warmup each DWB's mcif buffer*/ |
301 | for (i = 0; i < num_dwb; i++) { |
302 | dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst]; |
Value stored to 'dwb' is never read | |
303 | mcif_wb = dc->res_pool->mcif_wb[wb_info[i].dwb_pipe_inst]; |
304 | /*warmup is for VM mode only*/ |
305 | if (wb_info[i].mcif_buf_params.p_vmid == 0) |
306 | return false0; |
307 | |
308 | /* Warmup MCIF_WB */ |
309 | for (i_buf = 0; i_buf < MCIF_BUF_COUNT4; i_buf++) { |
310 | warmup_params.start_address.quad_part = wb_info[i].mcif_buf_params.luma_address[i_buf]; |
311 | warmup_params.address_increment = dc->dml.soc.vmm_page_size_bytes; |
312 | warmup_params.region_size = wb_info[i].mcif_buf_params.luma_pitch * wb_info[i].dwb_params.dest_height; |
313 | warmup_params.p_vmid = wb_info[i].mcif_buf_params.p_vmid; |
314 | mcif_wb->funcs->warmup_mcif(mcif_wb, &warmup_params); |
315 | } |
316 | } |
317 | return true1; |
318 | } |
319 | |
320 | void dcn30_enable_writeback( |
321 | struct dc *dc, |
322 | struct dc_writeback_info *wb_info, |
323 | struct dc_state *context) |
324 | { |
325 | struct dwbc *dwb; |
326 | struct mcif_wb *mcif_wb; |
327 | struct timing_generator *optc; |
328 | |
329 | dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; |
330 | mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; |
331 | |
332 | /* set the OPTC source mux */ |
333 | optc = dc->res_pool->timing_generators[dwb->otg_inst]; |
334 | DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\___drm_dbg(((void *)0), DRM_UT_KMS, "%s dwb_pipe_inst = %d, mpcc_inst = %d" , __func__, wb_info->dwb_pipe_inst, wb_info->mpcc_inst) |
335 | __func__, wb_info->dwb_pipe_inst,\___drm_dbg(((void *)0), DRM_UT_KMS, "%s dwb_pipe_inst = %d, mpcc_inst = %d" , __func__, wb_info->dwb_pipe_inst, wb_info->mpcc_inst) |
336 | wb_info->mpcc_inst)___drm_dbg(((void *)0), DRM_UT_KMS, "%s dwb_pipe_inst = %d, mpcc_inst = %d" , __func__, wb_info->dwb_pipe_inst, wb_info->mpcc_inst); |
337 | if (IS_DIAG_DC(dc->ctx->dce_environment)((dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) || ( dc->ctx->dce_environment == DCE_ENV_DIAG))) { |
338 | /*till diags switch to warmup interface*/ |
339 | dcn30_mmhubbub_warmup(dc, 1, wb_info); |
340 | } |
341 | /* Update writeback pipe */ |
342 | dcn30_set_writeback(dc, wb_info, context); |
343 | |
344 | /* Enable MCIF_WB */ |
345 | mcif_wb->funcs->enable_mcif(mcif_wb); |
346 | /* Enable DWB */ |
347 | dwb->funcs->enable(dwb, &wb_info->dwb_params); |
348 | } |
349 | |
350 | void dcn30_disable_writeback( |
351 | struct dc *dc, |
352 | unsigned int dwb_pipe_inst) |
353 | { |
354 | struct dwbc *dwb; |
355 | struct mcif_wb *mcif_wb; |
356 | |
357 | ASSERT(dwb_pipe_inst < MAX_DWB_PIPES)do { if (({ static int __warned; int __ret = !!(!(dwb_pipe_inst < 1)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(dwb_pipe_inst < 1)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c" , 357); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
358 | dwb = dc->res_pool->dwbc[dwb_pipe_inst]; |
359 | mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst]; |
360 | DC_LOG_DWB("%s dwb_pipe_inst = %d",\___drm_dbg(((void *)0), DRM_UT_KMS, "%s dwb_pipe_inst = %d", __func__ , dwb_pipe_inst) |
361 | __func__, dwb_pipe_inst)___drm_dbg(((void *)0), DRM_UT_KMS, "%s dwb_pipe_inst = %d", __func__ , dwb_pipe_inst); |
362 | |
363 | /* disable DWB */ |
364 | dwb->funcs->disable(dwb); |
365 | /* disable MCIF */ |
366 | mcif_wb->funcs->disable_mcif(mcif_wb); |
367 | /* disable MPC DWB mux */ |
368 | dc->res_pool->mpc->funcs->disable_dwb_mux(dc->res_pool->mpc, dwb_pipe_inst); |
369 | } |
370 | |
371 | void dcn30_program_all_writeback_pipes_in_tree( |
372 | struct dc *dc, |
373 | const struct dc_stream_state *stream, |
374 | struct dc_state *context) |
375 | { |
376 | struct dc_writeback_info wb_info; |
377 | struct dwbc *dwb; |
378 | struct dc_stream_status *stream_status = NULL((void *)0); |
379 | int i_wb, i_pipe, i_stream; |
380 | DC_LOG_DWB("%s", __func__)___drm_dbg(((void *)0), DRM_UT_KMS, "%s", __func__); |
381 | |
382 | ASSERT(stream)do { if (({ static int __warned; int __ret = !!(!(stream)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(stream)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c" , 382); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
383 | for (i_stream = 0; i_stream < context->stream_count; i_stream++) { |
384 | if (context->streams[i_stream] == stream) { |
385 | stream_status = &context->stream_status[i_stream]; |
386 | break; |
387 | } |
388 | } |
389 | ASSERT(stream_status)do { if (({ static int __warned; int __ret = !!(!(stream_status )); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(stream_status)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c" , 389); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
390 | |
391 | ASSERT(stream->num_wb_info <= dc->res_pool->res_cap->num_dwb)do { if (({ static int __warned; int __ret = !!(!(stream-> num_wb_info <= dc->res_pool->res_cap->num_dwb)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(stream->num_wb_info <= dc->res_pool->res_cap->num_dwb)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c" , 391); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
392 | /* For each writeback pipe */ |
393 | for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) { |
394 | |
395 | /* copy writeback info to local non-const so mpcc_inst can be set */ |
396 | wb_info = stream->writeback_info[i_wb]; |
397 | if (wb_info.wb_enabled) { |
398 | |
399 | /* get the MPCC instance for writeback_source_plane */ |
400 | wb_info.mpcc_inst = -1; |
401 | for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) { |
402 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe]; |
403 | |
404 | if (!pipe_ctx->plane_state) |
405 | continue; |
406 | |
407 | if (pipe_ctx->plane_state == wb_info.writeback_source_plane) { |
408 | wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst; |
409 | break; |
410 | } |
411 | } |
412 | |
413 | if (wb_info.mpcc_inst == -1) { |
414 | /* Disable writeback pipe and disconnect from MPCC |
415 | * if source plane has been removed |
416 | */ |
417 | dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst); |
418 | continue; |
419 | } |
420 | |
421 | ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb)do { if (({ static int __warned; int __ret = !!(!(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n", "!(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c" , 421); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
422 | dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst]; |
423 | if (dwb->funcs->is_enabled(dwb)) { |
424 | /* writeback pipe already enabled, only need to update */ |
425 | dc->hwss.update_writeback(dc, &wb_info, context); |
426 | } else { |
427 | /* Enable writeback pipe and connect to MPCC */ |
428 | dc->hwss.enable_writeback(dc, &wb_info, context); |
429 | } |
430 | } else { |
431 | /* Disable writeback pipe and disconnect from MPCC */ |
432 | dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst); |
433 | } |
434 | } |
435 | } |
436 | |
437 | void dcn30_init_hw(struct dc *dc) |
438 | { |
439 | struct abm **abms = dc->res_pool->multiple_abms; |
440 | struct dce_hwseq *hws = dc->hwseq; |
441 | struct dc_bios *dcb = dc->ctx->dc_bios; |
442 | struct resource_pool *res_pool = dc->res_pool; |
443 | int i; |
444 | int edp_num; |
445 | uint32_t backlight = MAX_BACKLIGHT_LEVEL0xFFFF; |
446 | |
447 | if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) |
448 | dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); |
449 | |
450 | // Initialize the dccg |
451 | if (res_pool->dccg->funcs->dccg_init) |
452 | res_pool->dccg->funcs->dccg_init(res_pool->dccg); |
453 | |
454 | if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)(dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS)) { |
455 | |
456 | REG_WRITE(REFCLK_CNTL, 0)dm_write_reg_func(hws->ctx, hws->regs->REFCLK_CNTL, 0 , __func__); |
457 | REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1)generic_reg_update_ex(hws->ctx, hws->regs->DCHUBBUB_GLOBAL_TIMER_CNTL , 1, hws->shifts->DCHUBBUB_GLOBAL_TIMER_ENABLE, hws-> masks->DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); |
458 | REG_WRITE(DIO_MEM_PWR_CTRL, 0)dm_write_reg_func(hws->ctx, hws->regs->DIO_MEM_PWR_CTRL , 0, __func__); |
459 | |
460 | if (!dc->debug.disable_clock_gate) { |
461 | /* enable all DCN clock gating */ |
462 | REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0)dm_write_reg_func(hws->ctx, hws->regs->DCCG_GATE_DISABLE_CNTL , 0, __func__); |
463 | |
464 | REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0)dm_write_reg_func(hws->ctx, hws->regs->DCCG_GATE_DISABLE_CNTL2 , 0, __func__); |
465 | |
466 | REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0)generic_reg_update_ex(hws->ctx, hws->regs->DCFCLK_CNTL , 1, hws->shifts->DCFCLK_GATE_DIS, hws->masks->DCFCLK_GATE_DIS , 0); |
467 | } |
468 | |
469 | //Enable ability to power gate / don't force power on permanently |
470 | if (hws->funcs.enable_power_gating_plane) |
471 | hws->funcs.enable_power_gating_plane(hws, true1); |
472 | |
473 | return; |
474 | } |
475 | |
476 | if (!dcb->funcs->is_accelerated_mode(dcb)) { |
477 | hws->funcs.bios_golden_init(dc); |
478 | hws->funcs.disable_vga(dc->hwseq); |
479 | } |
480 | |
481 | if (dc->debug.enable_mem_low_power.bits.dmcu) { |
482 | // Force ERAM to shutdown if DMCU is not enabled |
483 | if (dc->debug.disable_dmcu || dc->config.disable_dmcu) { |
484 | REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3)generic_reg_update_ex(hws->ctx, hws->regs->DMU_MEM_PWR_CNTL , 1, hws->shifts->DMCU_ERAM_MEM_PWR_FORCE, hws->masks ->DMCU_ERAM_MEM_PWR_FORCE, 3); |
485 | } |
486 | } |
487 | |
488 | // Set default OPTC memory power states |
489 | if (dc->debug.enable_mem_low_power.bits.optc) { |
490 | // Shutdown when unassigned and light sleep in VBLANK |
491 | REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1)generic_reg_set_ex(hws->ctx, hws->regs->ODM_MEM_PWR_CTRL3 , 0, 2, hws->shifts->ODM_MEM_UNASSIGNED_PWR_MODE, hws-> masks->ODM_MEM_UNASSIGNED_PWR_MODE, 3, hws->shifts-> ODM_MEM_VBLANK_PWR_MODE, hws->masks->ODM_MEM_VBLANK_PWR_MODE , 1); |
492 | } |
493 | |
494 | if (dc->ctx->dc_bios->fw_info_valid) { |
495 | res_pool->ref_clocks.xtalin_clock_inKhz = |
496 | dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; |
497 | |
498 | if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)(dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS)) { |
499 | if (res_pool->dccg && res_pool->hubbub) { |
500 | |
501 | (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, |
502 | dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, |
503 | &res_pool->ref_clocks.dccg_ref_clock_inKhz); |
504 | |
505 | (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, |
506 | res_pool->ref_clocks.dccg_ref_clock_inKhz, |
507 | &res_pool->ref_clocks.dchub_ref_clock_inKhz); |
508 | } else { |
509 | // Not all ASICs have DCCG sw component |
510 | res_pool->ref_clocks.dccg_ref_clock_inKhz = |
511 | res_pool->ref_clocks.xtalin_clock_inKhz; |
512 | res_pool->ref_clocks.dchub_ref_clock_inKhz = |
513 | res_pool->ref_clocks.xtalin_clock_inKhz; |
514 | } |
515 | } |
516 | } else |
517 | ASSERT_CRITICAL(false)do { if (({ int __ret = !!(!(0)); if (__ret) printf("WARNING %s failed at %s:%d\n" , "!(0)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c" , 517); __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
518 | |
519 | for (i = 0; i < dc->link_count; i++) { |
520 | /* Power up AND update implementation according to the |
521 | * required signal (which may be different from the |
522 | * default signal on connector). |
523 | */ |
524 | struct dc_link *link = dc->links[i]; |
525 | |
526 | link->link_enc->funcs->hw_init(link->link_enc); |
527 | |
528 | /* Check for enabled DIG to identify enabled display */ |
529 | if (link->link_enc->funcs->is_dig_enabled && |
530 | link->link_enc->funcs->is_dig_enabled(link->link_enc)) { |
531 | link->link_status.link_active = true1; |
532 | if (link->link_enc->funcs->fec_is_active && |
533 | link->link_enc->funcs->fec_is_active(link->link_enc)) |
534 | link->fec_state = dc_link_fec_enabled; |
535 | } |
536 | } |
537 | |
538 | /* Power gate DSCs */ |
539 | for (i = 0; i < res_pool->res_cap->num_dsc; i++) |
540 | if (hws->funcs.dsc_pg_control != NULL((void *)0)) |
541 | hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false0); |
542 | |
543 | /* we want to turn off all dp displays before doing detection */ |
544 | dc_link_blank_all_dp_displays(dc); |
545 | |
546 | if (hws->funcs.enable_power_gating_plane) |
547 | hws->funcs.enable_power_gating_plane(dc->hwseq, true1); |
548 | |
549 | /* If taking control over from VBIOS, we may want to optimize our first |
550 | * mode set, so we need to skip powering down pipes until we know which |
551 | * pipes we want to use. |
552 | * Otherwise, if taking control is not possible, we need to power |
553 | * everything down. |
554 | */ |
555 | if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { |
556 | hws->funcs.init_pipes(dc, dc->current_state); |
557 | if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) |
558 | dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, |
559 | !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); |
560 | } |
561 | |
562 | /* In headless boot cases, DIG may be turned |
563 | * on which causes HW/SW discrepancies. |
564 | * To avoid this, power down hardware on boot |
565 | * if DIG is turned on and seamless boot not enabled |
566 | */ |
567 | if (!dc->config.seamless_boot_edp_requested) { |
568 | struct dc_link *edp_links[MAX_NUM_EDP2]; |
569 | struct dc_link *edp_link = NULL((void *)0); |
570 | |
571 | get_edp_links(dc, edp_links, &edp_num); |
572 | if (edp_num) |
573 | edp_link = edp_links[0]; |
574 | if (edp_link && edp_link->link_enc->funcs->is_dig_enabled && |
575 | edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && |
576 | dc->hwss.edp_backlight_control && |
577 | dc->hwss.power_down && |
578 | dc->hwss.edp_power_control) { |
579 | dc->hwss.edp_backlight_control(edp_link, false0); |
580 | dc->hwss.power_down(dc); |
581 | dc->hwss.edp_power_control(edp_link, false0); |
582 | } else { |
583 | for (i = 0; i < dc->link_count; i++) { |
584 | struct dc_link *link = dc->links[i]; |
585 | |
586 | if (link->link_enc->funcs->is_dig_enabled && |
587 | link->link_enc->funcs->is_dig_enabled(link->link_enc) && |
588 | dc->hwss.power_down) { |
589 | dc->hwss.power_down(dc); |
590 | break; |
591 | } |
592 | |
593 | } |
594 | } |
595 | } |
596 | |
597 | for (i = 0; i < res_pool->audio_count; i++) { |
598 | struct audio *audio = res_pool->audios[i]; |
599 | |
600 | audio->funcs->hw_init(audio); |
601 | } |
602 | |
603 | for (i = 0; i < dc->link_count; i++) { |
604 | struct dc_link *link = dc->links[i]; |
605 | |
606 | if (link->panel_cntl) |
607 | backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); |
608 | } |
609 | |
610 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
611 | if (abms[i] != NULL((void *)0)) |
612 | abms[i]->funcs->abm_init(abms[i], backlight); |
613 | } |
614 | |
615 | /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ |
616 | REG_WRITE(DIO_MEM_PWR_CTRL, 0)dm_write_reg_func(hws->ctx, hws->regs->DIO_MEM_PWR_CTRL , 0, __func__); |
617 | |
618 | if (!dc->debug.disable_clock_gate) { |
619 | /* enable all DCN clock gating */ |
620 | REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0)dm_write_reg_func(hws->ctx, hws->regs->DCCG_GATE_DISABLE_CNTL , 0, __func__); |
621 | |
622 | REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0)dm_write_reg_func(hws->ctx, hws->regs->DCCG_GATE_DISABLE_CNTL2 , 0, __func__); |
623 | |
624 | REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0)generic_reg_update_ex(hws->ctx, hws->regs->DCFCLK_CNTL , 1, hws->shifts->DCFCLK_GATE_DIS, hws->masks->DCFCLK_GATE_DIS , 0); |
625 | } |
626 | |
627 | if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) |
628 | dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); |
629 | |
630 | if (dc->clk_mgr->funcs->notify_wm_ranges) |
631 | dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); |
632 | |
633 | //if softmax is enabled then hardmax will be set by a different call |
634 | if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) |
635 | dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); |
636 | |
637 | if (dc->res_pool->hubbub->funcs->force_pstate_change_control) |
638 | dc->res_pool->hubbub->funcs->force_pstate_change_control( |
639 | dc->res_pool->hubbub, false0, false0); |
640 | if (dc->res_pool->hubbub->funcs->init_crb) |
641 | dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); |
642 | |
643 | // Get DMCUB capabilities |
644 | dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); |
645 | dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; |
646 | dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; |
647 | } |
648 | |
649 | void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool_Bool enable) |
650 | { |
651 | if (pipe_ctx == NULL((void *)0)) |
652 | return; |
653 | |
654 | if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL((void *)0)) |
655 | pipe_ctx->stream_res.stream_enc->funcs->set_avmute( |
656 | pipe_ctx->stream_res.stream_enc, |
657 | enable); |
658 | } |
659 | |
660 | void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx) |
661 | { |
662 | bool_Bool is_hdmi_tmds; |
663 | bool_Bool is_dp; |
664 | |
665 | ASSERT(pipe_ctx->stream)do { if (({ static int __warned; int __ret = !!(!(pipe_ctx-> stream)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(pipe_ctx->stream)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c" , 665); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
666 | |
667 | if (pipe_ctx->stream_res.stream_enc == NULL((void *)0)) |
668 | return; /* this is not root pipe */ |
669 | |
670 | is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal); |
671 | is_dp = dc_is_dp_signal(pipe_ctx->stream->signal); |
672 | |
673 | if (!is_hdmi_tmds && !is_dp) |
674 | return; |
675 | |
676 | if (is_hdmi_tmds) |
677 | pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets( |
678 | pipe_ctx->stream_res.stream_enc, |
679 | &pipe_ctx->stream_res.encoder_info_frame); |
680 | else |
681 | pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets( |
682 | pipe_ctx->stream_res.stream_enc, |
683 | &pipe_ctx->stream_res.encoder_info_frame); |
684 | } |
685 | |
686 | void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx) |
687 | { |
688 | struct dc_stream_state *stream = pipe_ctx->stream; |
689 | struct hubp *hubp = pipe_ctx->plane_res.hubp; |
690 | bool_Bool enable = false0; |
691 | struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; |
692 | enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal) |
693 | ? dmdata_dp |
694 | : dmdata_hdmi; |
695 | |
696 | /* if using dynamic meta, don't set up generic infopackets */ |
697 | if (pipe_ctx->stream->dmdata_address.quad_part != 0) { |
698 | pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false0; |
699 | enable = true1; |
700 | } |
701 | |
702 | if (!hubp) |
703 | return; |
704 | |
705 | if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata) |
706 | return; |
707 | |
708 | stream_enc->funcs->set_dynamic_metadata(stream_enc, enable, |
709 | hubp->inst, mode); |
710 | } |
711 | |
712 | bool_Bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool_Bool enable) |
713 | { |
714 | union dmub_rb_cmd cmd; |
715 | uint32_t tmr_delay = 0, tmr_scale = 0; |
716 | struct dc_cursor_attributes cursor_attr; |
717 | bool_Bool cursor_cache_enable = false0; |
718 | struct dc_stream_state *stream = NULL((void *)0); |
719 | struct dc_plane_state *plane = NULL((void *)0); |
720 | |
721 | if (!dc->ctx->dmub_srv) |
722 | return false0; |
723 | |
724 | if (enable) { |
725 | if (dc->current_state) { |
726 | int i; |
727 | |
728 | /* First, check no-memory-requests case */ |
729 | for (i = 0; i < dc->current_state->stream_count; i++) { |
730 | if (dc->current_state->stream_status[i].plane_count) |
731 | /* Fail eligibility on a visible stream */ |
732 | break; |
733 | } |
734 | |
735 | if (i == dc->current_state->stream_count) { |
736 | /* Enable no-memory-requests case */ |
737 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
738 | cmd.mall.header.type = DMUB_CMD__MALL; |
739 | cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ; |
740 | cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header); |
741 | |
742 | dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); |
743 | dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); |
744 | |
745 | return true1; |
746 | } |
747 | |
748 | stream = dc->current_state->streams[0]; |
749 | plane = (stream ? dc->current_state->stream_status[0].plane_states[0] : NULL((void *)0)); |
750 | |
751 | if (stream && plane) { |
752 | cursor_cache_enable = stream->cursor_position.enable && |
753 | plane->address.grph.cursor_cache_addr.quad_part; |
754 | cursor_attr = stream->cursor_attributes; |
755 | } |
756 | |
757 | /* |
758 | * Second, check MALL eligibility |
759 | * |
760 | * single display only, single surface only, 8 and 16 bit formats only, no VM, |
761 | * do not use MALL for displays that support PSR as they use D0i3.2 in DMCUB FW |
762 | * |
763 | * TODO: When we implement multi-display, PSR displays will be allowed if there is |
764 | * a non-PSR display present, since in that case we can't do D0i3.2 |
765 | */ |
766 | if (dc->current_state->stream_count == 1 && |
767 | stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED && |
768 | dc->current_state->stream_status[0].plane_count == 1 && |
769 | plane->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F && |
770 | plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888 && |
771 | plane->address.page_table_base.quad_part == 0 && |
772 | dc->hwss.does_plane_fit_in_mall && |
773 | dc->hwss.does_plane_fit_in_mall(dc, plane, |
774 | cursor_cache_enable ? &cursor_attr : NULL((void *)0))) { |
775 | unsigned int v_total = stream->adjust.v_total_max ? |
776 | stream->adjust.v_total_max : stream->timing.v_total; |
777 | unsigned int refresh_hz = div_u64((unsigned long long) stream->timing.pix_clk_100hz * |
778 | 100LL, (v_total * stream->timing.h_total)); |
779 | |
780 | /* |
781 | * one frame time in microsec: |
782 | * Delay_Us = 1000000 / refresh |
783 | * dynamic_delay_us = 1000000 / refresh + 2 * stutter_period |
784 | * |
785 | * one frame time modified by 'additional timer percent' (p): |
786 | * Delay_Us_modified = dynamic_delay_us + dynamic_delay_us * p / 100 |
787 | * = dynamic_delay_us * (1 + p / 100) |
788 | * = (1000000 / refresh + 2 * stutter_period) * (100 + p) / 100 |
789 | * = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (100 * refresh) |
790 | * |
791 | * formula for timer duration based on parameters, from regspec: |
792 | * dynamic_delay_us = 65.28 * (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale |
793 | * |
794 | * dynamic_delay_us / 65.28 = (64 + MallFrameCacheTmrDly) * 2^MallFrameCacheTmrScale |
795 | * (dynamic_delay_us / 65.28) / 2^MallFrameCacheTmrScale = 64 + MallFrameCacheTmrDly |
796 | * MallFrameCacheTmrDly = ((dynamic_delay_us / 65.28) / 2^MallFrameCacheTmrScale) - 64 |
797 | * = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (100 * refresh) / 65.28 / 2^MallFrameCacheTmrScale - 64 |
798 | * = (1000000 + 2 * stutter_period * refresh) * (100 + p) / (refresh * 6528 * 2^MallFrameCacheTmrScale) - 64 |
799 | * |
800 | * need to round up the result of the division before the subtraction |
801 | */ |
802 | unsigned int denom = refresh_hz * 6528; |
803 | unsigned int stutter_period = dc->current_state->perf_params.stutter_period_us; |
804 | |
805 | tmr_delay = div_u64(((1000000LL + 2 * stutter_period * refresh_hz) * |
806 | (100LL + dc->debug.mall_additional_timer_percent) + denom - 1), |
807 | denom) - 64LL; |
808 | |
809 | /* In some cases the stutter period is really big (tiny modes) in these |
810 | * cases MALL cant be enabled, So skip these cases to avoid a ASSERT() |
811 | * |
812 | * We can check if stutter_period is more than 1/10th the frame time to |
813 | * consider if we can actually meet the range of hysteresis timer |
814 | */ |
815 | if (stutter_period > 100000/refresh_hz) |
816 | return false0; |
817 | |
818 | /* scale should be increased until it fits into 6 bits */ |
819 | while (tmr_delay & ~0x3F) { |
820 | tmr_scale++; |
821 | |
822 | if (tmr_scale > 3) { |
823 | /* Delay exceeds range of hysteresis timer */ |
824 | ASSERT(false)do { if (({ static int __warned; int __ret = !!(!(0)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(0)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn30/dcn30_hwseq.c" , 824); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
825 | return false0; |
826 | } |
827 | |
828 | denom *= 2; |
829 | tmr_delay = div_u64(((1000000LL + 2 * stutter_period * refresh_hz) * |
830 | (100LL + dc->debug.mall_additional_timer_percent) + denom - 1), |
831 | denom) - 64LL; |
832 | } |
833 | |
834 | /* Copy HW cursor */ |
835 | if (cursor_cache_enable) { |
836 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
837 | cmd.mall.header.type = DMUB_CMD__MALL; |
838 | cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_COPY_CURSOR; |
839 | cmd.mall.header.payload_bytes = |
840 | sizeof(cmd.mall) - sizeof(cmd.mall.header); |
841 | |
842 | switch (cursor_attr.color_format) { |
843 | case CURSOR_MODE_MONO: |
844 | cmd.mall.cursor_bpp = 2; |
845 | break; |
846 | case CURSOR_MODE_COLOR_1BIT_AND: |
847 | case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: |
848 | case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: |
849 | cmd.mall.cursor_bpp = 32; |
850 | break; |
851 | |
852 | case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: |
853 | case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: |
854 | cmd.mall.cursor_bpp = 64; |
855 | break; |
856 | } |
857 | |
858 | cmd.mall.cursor_copy_src.quad_part = cursor_attr.address.quad_part; |
859 | cmd.mall.cursor_copy_dst.quad_part = |
860 | (plane->address.grph.cursor_cache_addr.quad_part + 2047) & ~2047; |
861 | cmd.mall.cursor_width = cursor_attr.width; |
862 | cmd.mall.cursor_height = cursor_attr.height; |
863 | cmd.mall.cursor_pitch = cursor_attr.pitch; |
864 | |
865 | dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); |
866 | dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); |
867 | dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); |
868 | |
869 | /* Use copied cursor, and it's okay to not switch back */ |
870 | cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part; |
871 | dc_stream_set_cursor_attributes(stream, &cursor_attr); |
872 | } |
873 | |
874 | /* Enable MALL */ |
875 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
876 | cmd.mall.header.type = DMUB_CMD__MALL; |
877 | cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_ALLOW; |
878 | cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header); |
879 | cmd.mall.tmr_delay = tmr_delay; |
880 | cmd.mall.tmr_scale = tmr_scale; |
881 | cmd.mall.debug_bits = dc->debug.mall_error_as_fatal; |
882 | |
883 | dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); |
884 | dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); |
885 | |
886 | return true1; |
887 | } |
888 | } |
889 | |
890 | /* No applicable optimizations */ |
891 | return false0; |
892 | } |
893 | |
894 | /* Disable MALL */ |
895 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
896 | cmd.mall.header.type = DMUB_CMD__MALL; |
897 | cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_DISALLOW; |
898 | cmd.mall.header.payload_bytes = |
899 | sizeof(cmd.mall) - sizeof(cmd.mall.header); |
900 | |
901 | dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); |
902 | dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); |
903 | dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); |
904 | |
905 | return true1; |
906 | } |
907 | |
908 | bool_Bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr) |
909 | { |
910 | // add meta size? |
911 | unsigned int surface_size = plane->plane_size.surface_pitch * plane->plane_size.surface_size.height * |
912 | (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4); |
913 | unsigned int mall_size = dc->caps.mall_size_total; |
914 | unsigned int cursor_size = 0; |
915 | |
916 | if (dc->debug.mall_size_override) |
917 | mall_size = 1024 * 1024 * dc->debug.mall_size_override; |
918 | |
919 | if (cursor_attr) { |
920 | cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size; |
921 | |
922 | switch (cursor_attr->color_format) { |
923 | case CURSOR_MODE_MONO: |
924 | cursor_size /= 2; |
925 | break; |
926 | case CURSOR_MODE_COLOR_1BIT_AND: |
927 | case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: |
928 | case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: |
929 | cursor_size *= 4; |
930 | break; |
931 | |
932 | case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: |
933 | case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: |
934 | cursor_size *= 8; |
935 | break; |
936 | } |
937 | } |
938 | |
939 | return (surface_size + cursor_size) < mall_size; |
940 | } |
941 | |
942 | void dcn30_hardware_release(struct dc *dc) |
943 | { |
944 | bool_Bool subvp_in_use = false0; |
945 | uint32_t i; |
946 | |
947 | dc_dmub_srv_p_state_delegate(dc, false0, NULL((void *)0)); |
948 | dc_dmub_setup_subvp_dmub_command(dc, dc->current_state, false0); |
949 | |
950 | /* SubVP treated the same way as FPO. If driver disable and |
951 | * we are using a SubVP config, disable and force on DCN side |
952 | * to prevent P-State hang on driver enable. |
953 | */ |
954 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
955 | struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
956 | |
957 | if (!pipe->stream) |
958 | continue; |
959 | |
960 | if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { |
961 | subvp_in_use = true1; |
962 | break; |
963 | } |
964 | } |
965 | /* If pstate unsupported, or still supported |
966 | * by firmware, force it supported by dcn |
967 | */ |
968 | if (dc->current_state) |
969 | if ((!dc->clk_mgr->clks.p_state_change_support || subvp_in_use || |
970 | dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) && |
971 | dc->res_pool->hubbub->funcs->force_pstate_change_control) |
972 | dc->res_pool->hubbub->funcs->force_pstate_change_control( |
973 | dc->res_pool->hubbub, true1, true1); |
974 | } |
975 | |
976 | void dcn30_set_disp_pattern_generator(const struct dc *dc, |
977 | struct pipe_ctx *pipe_ctx, |
978 | enum controller_dp_test_pattern test_pattern, |
979 | enum controller_dp_color_space color_space, |
980 | enum dc_color_depth color_depth, |
981 | const struct tg_color *solid_color, |
982 | int width, int height, int offset) |
983 | { |
984 | pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern, |
985 | color_space, color_depth, solid_color, width, height, offset); |
986 | } |
987 | |
988 | void dcn30_prepare_bandwidth(struct dc *dc, |
989 | struct dc_state *context) |
990 | { |
991 | if (dc->clk_mgr->dc_mode_softmax_enabled) |
992 | if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && |
993 | context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) |
994 | dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); |
995 | |
996 | dcn20_prepare_bandwidth(dc, context); |
997 | |
998 | dc_dmub_srv_p_state_delegate(dc, |
999 | context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context); |
1000 | } |
1001 |