File: | dev/pci/drm/amd/display/dc/dcn32/dcn32_hwseq.c |
Warning: | line 306, column 6 Value stored to 'cursor_bpp' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: AMD |
23 | * |
24 | */ |
25 | |
26 | |
27 | #include "dm_services.h" |
28 | #include "dm_helpers.h" |
29 | #include "core_types.h" |
30 | #include "resource.h" |
31 | #include "dccg.h" |
32 | #include "dce/dce_hwseq.h" |
33 | #include "dcn30/dcn30_cm_common.h" |
34 | #include "reg_helper.h" |
35 | #include "abm.h" |
36 | #include "hubp.h" |
37 | #include "dchubbub.h" |
38 | #include "timing_generator.h" |
39 | #include "opp.h" |
40 | #include "ipp.h" |
41 | #include "mpc.h" |
42 | #include "mcif_wb.h" |
43 | #include "dc_dmub_srv.h" |
44 | #include "link_hwss.h" |
45 | #include "dpcd_defs.h" |
46 | #include "dcn32_hwseq.h" |
47 | #include "clk_mgr.h" |
48 | #include "dsc.h" |
49 | #include "dcn20/dcn20_optc.h" |
50 | #include "dmub_subvp_state.h" |
51 | #include "dce/dmub_hw_lock_mgr.h" |
52 | #include "dcn32_resource.h" |
53 | #include "dc_link_dp.h" |
54 | #include "dmub/inc/dmub_subvp_state.h" |
55 | |
56 | #define DC_LOGGER_INIT(logger) |
57 | |
58 | #define CTXhws->ctx \ |
59 | hws->ctx |
60 | #define REG(reg)hws->regs->reg\ |
61 | hws->regs->reg |
62 | #define DC_LOGGERdc->ctx->logger \ |
63 | dc->ctx->logger |
64 | |
65 | |
66 | #undef FN |
67 | #define FN(reg_name, field_name)hws->shifts->field_name, hws->masks->field_name \ |
68 | hws->shifts->field_name, hws->masks->field_name |
69 | |
70 | void dcn32_dsc_pg_control( |
71 | struct dce_hwseq *hws, |
72 | unsigned int dsc_inst, |
73 | bool_Bool power_on) |
74 | { |
75 | uint32_t power_gate = power_on ? 0 : 1; |
76 | uint32_t pwr_status = power_on ? 0 : 2; |
77 | uint32_t org_ip_request_cntl = 0; |
78 | |
79 | if (hws->ctx->dc->debug.disable_dsc_power_gate) |
80 | return; |
81 | |
82 | if (!hws->ctx->dc->debug.enable_double_buffered_dsc_pg_support) |
83 | return; |
84 | |
85 | REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl)generic_reg_get(hws->ctx, hws->regs->DC_IP_REQUEST_CNTL , hws->shifts->IP_REQUEST_EN, hws->masks->IP_REQUEST_EN , &org_ip_request_cntl); |
86 | if (org_ip_request_cntl == 0) |
87 | REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1)generic_reg_set_ex(hws->ctx, hws->regs->DC_IP_REQUEST_CNTL , 0, 1, hws->shifts->IP_REQUEST_EN, hws->masks->IP_REQUEST_EN , 1); |
88 | |
89 | switch (dsc_inst) { |
90 | case 0: /* DSC0 */ |
91 | REG_UPDATE(DOMAIN16_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN16_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_GATE, hws->masks-> DOMAIN_POWER_GATE, power_gate) |
92 | DOMAIN_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN16_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_GATE, hws->masks-> DOMAIN_POWER_GATE, power_gate); |
93 | |
94 | REG_WAIT(DOMAIN16_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN16_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 96) |
95 | DOMAIN_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN16_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 96) |
96 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN16_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 96); |
97 | break; |
98 | case 1: /* DSC1 */ |
99 | REG_UPDATE(DOMAIN17_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN17_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_GATE, hws->masks-> DOMAIN_POWER_GATE, power_gate) |
100 | DOMAIN_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN17_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_GATE, hws->masks-> DOMAIN_POWER_GATE, power_gate); |
101 | |
102 | REG_WAIT(DOMAIN17_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN17_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 104) |
103 | DOMAIN_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN17_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 104) |
104 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN17_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 104); |
105 | break; |
106 | case 2: /* DSC2 */ |
107 | REG_UPDATE(DOMAIN18_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN18_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_GATE, hws->masks-> DOMAIN_POWER_GATE, power_gate) |
108 | DOMAIN_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN18_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_GATE, hws->masks-> DOMAIN_POWER_GATE, power_gate); |
109 | |
110 | REG_WAIT(DOMAIN18_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN18_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 112) |
111 | DOMAIN_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN18_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 112) |
112 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN18_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 112); |
113 | break; |
114 | case 3: /* DSC3 */ |
115 | REG_UPDATE(DOMAIN19_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN19_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_GATE, hws->masks-> DOMAIN_POWER_GATE, power_gate) |
116 | DOMAIN_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN19_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_GATE, hws->masks-> DOMAIN_POWER_GATE, power_gate); |
117 | |
118 | REG_WAIT(DOMAIN19_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN19_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 120) |
119 | DOMAIN_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN19_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 120) |
120 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN19_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 120); |
121 | break; |
122 | default: |
123 | BREAK_TO_DEBUGGER()do { ___drm_dbg(((void *)0), DRM_UT_DRIVER, "%s():%d\n", __func__ , 123); do {} while (0); } while (0); |
124 | break; |
125 | } |
126 | |
127 | if (org_ip_request_cntl == 0) |
128 | REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0)generic_reg_set_ex(hws->ctx, hws->regs->DC_IP_REQUEST_CNTL , 0, 1, hws->shifts->IP_REQUEST_EN, hws->masks->IP_REQUEST_EN , 0); |
129 | } |
130 | |
131 | |
132 | void dcn32_enable_power_gating_plane( |
133 | struct dce_hwseq *hws, |
134 | bool_Bool enable) |
135 | { |
136 | bool_Bool force_on = true1; /* disable power gating */ |
137 | |
138 | if (enable) |
139 | force_on = false0; |
140 | |
141 | /* DCHUBP0/1/2/3 */ |
142 | REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN0_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_FORCEON, hws->masks-> DOMAIN_POWER_FORCEON, force_on); |
143 | REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN1_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_FORCEON, hws->masks-> DOMAIN_POWER_FORCEON, force_on); |
144 | REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN2_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_FORCEON, hws->masks-> DOMAIN_POWER_FORCEON, force_on); |
145 | REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN3_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_FORCEON, hws->masks-> DOMAIN_POWER_FORCEON, force_on); |
146 | |
147 | /* DCS0/1/2/3 */ |
148 | REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN16_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_FORCEON, hws->masks-> DOMAIN_POWER_FORCEON, force_on); |
149 | REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN17_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_FORCEON, hws->masks-> DOMAIN_POWER_FORCEON, force_on); |
150 | REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN18_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_FORCEON, hws->masks-> DOMAIN_POWER_FORCEON, force_on); |
151 | REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN19_PG_CONFIG , 1, hws->shifts->DOMAIN_POWER_FORCEON, hws->masks-> DOMAIN_POWER_FORCEON, force_on); |
152 | } |
153 | |
154 | void dcn32_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool_Bool power_on) |
155 | { |
156 | uint32_t power_gate = power_on ? 0 : 1; |
157 | uint32_t pwr_status = power_on ? 0 : 2; |
158 | |
159 | if (hws->ctx->dc->debug.disable_hubp_power_gate) |
160 | return; |
161 | |
162 | if (REG(DOMAIN0_PG_CONFIG)hws->regs->DOMAIN0_PG_CONFIG == 0) |
163 | return; |
164 | |
165 | switch (hubp_inst) { |
166 | case 0: |
167 | REG_SET(DOMAIN0_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate)generic_reg_set_ex(hws->ctx, hws->regs->DOMAIN0_PG_CONFIG , 0, 1, hws->shifts->DOMAIN_POWER_GATE, hws->masks-> DOMAIN_POWER_GATE, power_gate); |
168 | REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN0_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 168); |
169 | break; |
170 | case 1: |
171 | REG_SET(DOMAIN1_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate)generic_reg_set_ex(hws->ctx, hws->regs->DOMAIN1_PG_CONFIG , 0, 1, hws->shifts->DOMAIN_POWER_GATE, hws->masks-> DOMAIN_POWER_GATE, power_gate); |
172 | REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN1_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 172); |
173 | break; |
174 | case 2: |
175 | REG_SET(DOMAIN2_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate)generic_reg_set_ex(hws->ctx, hws->regs->DOMAIN2_PG_CONFIG , 0, 1, hws->shifts->DOMAIN_POWER_GATE, hws->masks-> DOMAIN_POWER_GATE, power_gate); |
176 | REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN2_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 176); |
177 | break; |
178 | case 3: |
179 | REG_SET(DOMAIN3_PG_CONFIG, 0, DOMAIN_POWER_GATE, power_gate)generic_reg_set_ex(hws->ctx, hws->regs->DOMAIN3_PG_CONFIG , 0, 1, hws->shifts->DOMAIN_POWER_GATE, hws->masks-> DOMAIN_POWER_GATE, power_gate); |
180 | REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN3_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 180); |
181 | break; |
182 | default: |
183 | BREAK_TO_DEBUGGER()do { ___drm_dbg(((void *)0), DRM_UT_DRIVER, "%s():%d\n", __func__ , 183); do {} while (0); } while (0); |
184 | break; |
185 | } |
186 | } |
187 | |
188 | static bool_Bool dcn32_check_no_memory_request_for_cab(struct dc *dc) |
189 | { |
190 | int i; |
191 | |
192 | /* First, check no-memory-request case */ |
193 | for (i = 0; i < dc->current_state->stream_count; i++) { |
194 | if (dc->current_state->stream_status[i].plane_count) |
195 | /* Fail eligibility on a visible stream */ |
196 | break; |
197 | } |
198 | |
199 | if (i == dc->current_state->stream_count) |
200 | return true1; |
201 | |
202 | return false0; |
203 | } |
204 | |
205 | |
206 | /* This function loops through every surface that needs to be cached in CAB for SS, |
207 | * and calculates the total number of ways required to store all surfaces (primary, |
208 | * meta, cursor). |
209 | */ |
210 | static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx) |
211 | { |
212 | int i, j; |
213 | struct dc_stream_state *stream = NULL((void *)0); |
214 | struct dc_plane_state *plane = NULL((void *)0); |
215 | uint32_t cursor_size = 0; |
216 | uint32_t total_lines = 0; |
217 | uint32_t lines_per_way = 0; |
218 | uint8_t num_ways = 0; |
219 | uint8_t bytes_per_pixel = 0; |
220 | uint8_t cursor_bpp = 0; |
221 | uint16_t mblk_width = 0; |
222 | uint16_t mblk_height = 0; |
223 | uint16_t mall_alloc_width_blk_aligned = 0; |
224 | uint16_t mall_alloc_height_blk_aligned = 0; |
225 | uint16_t num_mblks = 0; |
226 | uint32_t bytes_in_mall = 0; |
227 | uint32_t cache_lines_used = 0; |
228 | uint32_t cache_lines_per_plane = 0; |
229 | |
230 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
231 | struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
232 | |
233 | if (!pipe->stream || !pipe->plane_state || |
234 | pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED || |
235 | pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) |
236 | continue; |
237 | |
238 | bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; |
239 | mblk_width = DCN3_2_MBLK_WIDTH128; |
240 | mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE128 : DCN3_2_MBLK_HEIGHT_8BPE64; |
241 | |
242 | /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) - |
243 | * FLOOR(vp_x_start, blk_width) |
244 | * |
245 | * mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c |
246 | */ |
247 | mall_alloc_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x + |
248 | pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) - |
249 | (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width); |
250 | |
251 | /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) - |
252 | * FLOOR(vp_y_start, blk_height) |
253 | * |
254 | * mall_alloc_height_blk_aligned_l/c = full_vp_height_blk_aligned_l/c |
255 | */ |
256 | mall_alloc_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y + |
257 | pipe->plane_res.scl_data.viewport.height + mblk_height - 1) / mblk_height * mblk_height) - |
258 | (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height); |
259 | |
260 | num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) * |
261 | ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height); |
262 | |
263 | /* For DCC: |
264 | * meta_num_mblk = CEILING(full_mblk_width_ub_l*full_mblk_height_ub_l*Bpe/256/mblk_bytes, 1) |
265 | */ |
266 | if (pipe->plane_state->dcc.enable) |
267 | num_mblks += (mall_alloc_width_blk_aligned * mall_alloc_width_blk_aligned * bytes_per_pixel + |
268 | (256 * DCN3_2_MALL_MBLK_SIZE_BYTES65536) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES65536); |
269 | |
270 | bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES65536; |
271 | |
272 | /* (cache lines used is total bytes / cache_line size. Add +2 for worst case alignment |
273 | * (MALL is 64-byte aligned) |
274 | */ |
275 | cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2; |
276 | cache_lines_used += cache_lines_per_plane; |
277 | } |
278 | |
279 | // Include cursor size for CAB allocation |
280 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
281 | struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j]; |
282 | struct hubp *hubp = pipe->plane_res.hubp; |
283 | |
284 | if (pipe->stream && pipe->plane_state && hubp) |
285 | /* Find the cursor plane and use the exact size instead of |
286 | using the max for calculation */ |
287 | |
288 | if (hubp->curs_attr.width > 0) { |
289 | cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; |
290 | |
291 | switch (pipe->stream->cursor_attributes.color_format) { |
292 | case CURSOR_MODE_MONO: |
293 | cursor_size /= 2; |
294 | cursor_bpp = 4; |
295 | break; |
296 | case CURSOR_MODE_COLOR_1BIT_AND: |
297 | case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: |
298 | case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: |
299 | cursor_size *= 4; |
300 | cursor_bpp = 4; |
301 | break; |
302 | |
303 | case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: |
304 | case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: |
305 | cursor_size *= 8; |
306 | cursor_bpp = 8; |
Value stored to 'cursor_bpp' is never read | |
307 | break; |
308 | } |
309 | |
310 | if (pipe->stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor && |
311 | cursor_size > 16384) { |
312 | /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1) |
313 | */ |
314 | cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES65536 - 1) / |
315 | DCN3_2_MALL_MBLK_SIZE_BYTES65536) * DCN3_2_MALL_MBLK_SIZE_BYTES65536) / |
316 | dc->caps.cache_line_size + 2; |
317 | } |
318 | break; |
319 | } |
320 | } |
321 | |
322 | // Convert number of cache lines required to number of ways |
323 | total_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; |
324 | lines_per_way = total_lines / dc->caps.cache_num_ways; |
325 | num_ways = cache_lines_used / lines_per_way; |
326 | |
327 | if (cache_lines_used % lines_per_way > 0) |
328 | num_ways++; |
329 | |
330 | for (i = 0; i < ctx->stream_count; i++) { |
331 | stream = ctx->streams[i]; |
332 | for (j = 0; j < ctx->stream_status[i].plane_count; j++) { |
333 | plane = ctx->stream_status[i].plane_states[j]; |
334 | |
335 | if (stream->cursor_position.enable && plane && |
336 | dc->debug.alloc_extra_way_for_cursor && |
337 | cursor_size > 16384) { |
338 | /* Cursor caching is not supported since it won't be on the same line. |
339 | * So we need an extra line to accommodate it. With large cursors and a single 4k monitor |
340 | * this case triggers corruption. If we're at the edge, then dont trigger display refresh |
341 | * from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp. |
342 | */ |
343 | num_ways++; |
344 | /* We only expect one cursor plane */ |
345 | break; |
346 | } |
347 | } |
348 | } |
349 | if (dc->debug.force_mall_ss_num_ways > 0) { |
350 | num_ways = dc->debug.force_mall_ss_num_ways; |
351 | } |
352 | return num_ways; |
353 | } |
354 | |
355 | bool_Bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool_Bool enable) |
356 | { |
357 | union dmub_rb_cmd cmd; |
358 | uint8_t ways, i; |
359 | int j; |
360 | bool_Bool mall_ss_unsupported = false0; |
361 | struct dc_plane_state *plane = NULL((void *)0); |
362 | |
363 | if (!dc->ctx->dmub_srv) |
364 | return false0; |
365 | |
366 | if (enable) { |
367 | if (dc->current_state) { |
368 | |
369 | /* 1. Check no memory request case for CAB. |
370 | * If no memory request case, send CAB_ACTION NO_DF_REQ DMUB message |
371 | */ |
372 | if (dcn32_check_no_memory_request_for_cab(dc)) { |
373 | /* Enable no-memory-requests case */ |
374 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
375 | cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; |
376 | cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ; |
377 | cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); |
378 | |
379 | dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); |
380 | dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); |
381 | |
382 | return true1; |
383 | } |
384 | |
385 | /* 2. Check if all surfaces can fit in CAB. |
386 | * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message |
387 | * and configure HUBP's to fetch from MALL |
388 | */ |
389 | ways = dcn32_calculate_cab_allocation(dc, dc->current_state); |
390 | |
391 | /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo, |
392 | * or TMZ surface, don't try to enter MALL. |
393 | */ |
394 | for (i = 0; i < dc->current_state->stream_count; i++) { |
395 | for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { |
396 | plane = dc->current_state->stream_status[i].plane_states[j]; |
397 | |
398 | if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO || |
399 | plane->address.tmz_surface) { |
400 | mall_ss_unsupported = true1; |
401 | break; |
402 | } |
403 | } |
404 | if (mall_ss_unsupported) |
405 | break; |
406 | } |
407 | if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) { |
408 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
409 | cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; |
410 | cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB; |
411 | cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); |
412 | cmd.cab.cab_alloc_ways = ways; |
413 | |
414 | dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); |
415 | dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); |
416 | |
417 | return true1; |
418 | } |
419 | |
420 | } |
421 | return false0; |
422 | } |
423 | |
424 | /* Disable CAB */ |
425 | memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd))); |
426 | cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; |
427 | cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION; |
428 | cmd.cab.header.payload_bytes = |
429 | sizeof(cmd.cab) - sizeof(cmd.cab.header); |
430 | |
431 | dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); |
432 | dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); |
433 | dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); |
434 | |
435 | return true1; |
436 | } |
437 | |
438 | /* Send DMCUB message with SubVP pipe info |
439 | * - For each pipe in context, populate payload with required SubVP information |
440 | * if the pipe is using SubVP for MCLK switch |
441 | * - This function must be called while the DMUB HW lock is acquired by driver |
442 | */ |
443 | void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context) |
444 | { |
445 | int i; |
446 | bool_Bool enable_subvp = false0; |
447 | |
448 | if (!dc->ctx || !dc->ctx->dmub_srv) |
449 | return; |
450 | |
451 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
452 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
453 | |
454 | if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.paired_stream && |
455 | pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { |
456 | // There is at least 1 SubVP pipe, so enable SubVP |
457 | enable_subvp = true1; |
458 | break; |
459 | } |
460 | } |
461 | dc_dmub_setup_subvp_dmub_command(dc, context, enable_subvp); |
462 | } |
463 | |
464 | /* Sub-Viewport DMUB lock needs to be acquired by driver whenever SubVP is active and: |
465 | * 1. Any full update for any SubVP main pipe |
466 | * 2. Any immediate flip for any SubVP pipe |
467 | * 3. Any flip for DRR pipe |
468 | * 4. If SubVP was previously in use (i.e. in old context) |
469 | */ |
470 | void dcn32_subvp_pipe_control_lock(struct dc *dc, |
471 | struct dc_state *context, |
472 | bool_Bool lock, |
473 | bool_Bool should_lock_all_pipes, |
474 | struct pipe_ctx *top_pipe_to_program, |
475 | bool_Bool subvp_prev_use) |
476 | { |
477 | unsigned int i = 0; |
478 | bool_Bool subvp_immediate_flip = false0; |
479 | bool_Bool subvp_in_use = false0; |
480 | struct pipe_ctx *pipe; |
481 | |
482 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
483 | pipe = &context->res_ctx.pipe_ctx[i]; |
484 | |
485 | if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { |
486 | subvp_in_use = true1; |
487 | break; |
488 | } |
489 | } |
490 | |
491 | if (top_pipe_to_program && top_pipe_to_program->stream && top_pipe_to_program->plane_state) { |
492 | if (top_pipe_to_program->stream->mall_stream_config.type == SUBVP_MAIN && |
493 | top_pipe_to_program->plane_state->flip_immediate) |
494 | subvp_immediate_flip = true1; |
495 | } |
496 | |
497 | // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared. |
498 | if ((subvp_in_use && (should_lock_all_pipes || subvp_immediate_flip)) || (!subvp_in_use && subvp_prev_use)) { |
499 | union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; |
500 | |
501 | if (!lock) { |
502 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
503 | pipe = &context->res_ctx.pipe_ctx[i]; |
504 | if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN && |
505 | should_lock_all_pipes) |
506 | pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); |
507 | } |
508 | } |
509 | |
510 | hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; |
511 | hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; |
512 | hw_lock_cmd.bits.lock = lock; |
513 | hw_lock_cmd.bits.should_release = !lock; |
514 | dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); |
515 | } |
516 | } |
517 | |
518 | |
519 | static bool_Bool dcn32_set_mpc_shaper_3dlut( |
520 | struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) |
521 | { |
522 | struct dpp *dpp_base = pipe_ctx->plane_res.dpp; |
523 | int mpcc_id = pipe_ctx->plane_res.hubp->inst; |
524 | struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; |
525 | bool_Bool result = false0; |
526 | |
527 | const struct pwl_params *shaper_lut = NULL((void *)0); |
528 | //get the shaper lut params |
529 | if (stream->func_shaper) { |
530 | if (stream->func_shaper->type == TF_TYPE_HWPWL) |
531 | shaper_lut = &stream->func_shaper->pwl; |
532 | else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { |
533 | cm_helper_translate_curve_to_hw_format(stream->ctx, |
534 | stream->func_shaper, |
535 | &dpp_base->shaper_params, true1); |
536 | shaper_lut = &dpp_base->shaper_params; |
537 | } |
538 | } |
539 | |
540 | if (stream->lut3d_func && |
541 | stream->lut3d_func->state.bits.initialized == 1) { |
542 | |
543 | result = mpc->funcs->program_3dlut(mpc, |
544 | &stream->lut3d_func->lut_3d, |
545 | mpcc_id); |
546 | |
547 | result = mpc->funcs->program_shaper(mpc, |
548 | shaper_lut, |
549 | mpcc_id); |
550 | } |
551 | |
552 | return result; |
553 | } |
554 | |
555 | bool_Bool dcn32_set_mcm_luts( |
556 | struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) |
557 | { |
558 | struct dpp *dpp_base = pipe_ctx->plane_res.dpp; |
559 | int mpcc_id = pipe_ctx->plane_res.hubp->inst; |
560 | struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; |
561 | bool_Bool result = true1; |
562 | struct pwl_params *lut_params = NULL((void *)0); |
563 | |
564 | // 1D LUT |
565 | if (plane_state->blend_tf) { |
566 | if (plane_state->blend_tf->type == TF_TYPE_HWPWL) |
567 | lut_params = &plane_state->blend_tf->pwl; |
568 | else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { |
569 | cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf, |
570 | &dpp_base->regamma_params, false0); |
571 | lut_params = &dpp_base->regamma_params; |
572 | } |
573 | } |
574 | result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); |
575 | |
576 | // Shaper |
577 | if (plane_state->in_shaper_func) { |
578 | if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) |
579 | lut_params = &plane_state->in_shaper_func->pwl; |
580 | else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { |
581 | // TODO: dpp_base replace |
582 | ASSERT(false)do { if (({ static int __warned; int __ret = !!(!(0)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(0)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn32/dcn32_hwseq.c" , 582); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
583 | cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func, |
584 | &dpp_base->shaper_params, true1); |
585 | lut_params = &dpp_base->shaper_params; |
586 | } |
587 | } |
588 | |
589 | result = mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); |
590 | |
591 | // 3D |
592 | if (plane_state->lut3d_func && plane_state->lut3d_func->state.bits.initialized == 1) |
593 | result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func->lut_3d, mpcc_id); |
594 | else |
595 | result = mpc->funcs->program_3dlut(mpc, NULL((void *)0), mpcc_id); |
596 | |
597 | return result; |
598 | } |
599 | |
600 | bool_Bool dcn32_set_input_transfer_func(struct dc *dc, |
601 | struct pipe_ctx *pipe_ctx, |
602 | const struct dc_plane_state *plane_state) |
603 | { |
604 | struct dce_hwseq *hws = dc->hwseq; |
605 | struct mpc *mpc = dc->res_pool->mpc; |
606 | struct dpp *dpp_base = pipe_ctx->plane_res.dpp; |
607 | |
608 | enum dc_transfer_func_predefined tf; |
609 | bool_Bool result = true1; |
610 | struct pwl_params *params = NULL((void *)0); |
611 | |
612 | if (mpc == NULL((void *)0) || plane_state == NULL((void *)0)) |
613 | return false0; |
614 | |
615 | tf = TRANSFER_FUNCTION_UNITY; |
616 | |
617 | if (plane_state->in_transfer_func && |
618 | plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED) |
619 | tf = plane_state->in_transfer_func->tf; |
620 | |
621 | dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf); |
622 | |
623 | if (plane_state->in_transfer_func) { |
624 | if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL) |
625 | params = &plane_state->in_transfer_func->pwl; |
626 | else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && |
627 | cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func, |
628 | &dpp_base->degamma_params, false0)) |
629 | params = &dpp_base->degamma_params; |
630 | } |
631 | |
632 | dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); |
633 | |
634 | if (pipe_ctx->stream_res.opp && |
635 | pipe_ctx->stream_res.opp->ctx && |
636 | hws->funcs.set_mcm_luts) |
637 | result = hws->funcs.set_mcm_luts(pipe_ctx, plane_state); |
638 | |
639 | return result; |
640 | } |
641 | |
642 | bool_Bool dcn32_set_output_transfer_func(struct dc *dc, |
643 | struct pipe_ctx *pipe_ctx, |
644 | const struct dc_stream_state *stream) |
645 | { |
646 | int mpcc_id = pipe_ctx->plane_res.hubp->inst; |
647 | struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; |
648 | struct pwl_params *params = NULL((void *)0); |
649 | bool_Bool ret = false0; |
650 | |
651 | /* program OGAM or 3DLUT only for the top pipe*/ |
652 | if (pipe_ctx->top_pipe == NULL((void *)0)) { |
653 | /*program shaper and 3dlut in MPC*/ |
654 | ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream); |
655 | if (ret == false0 && mpc->funcs->set_output_gamma && stream->out_transfer_func) { |
656 | if (stream->out_transfer_func->type == TF_TYPE_HWPWL) |
657 | params = &stream->out_transfer_func->pwl; |
658 | else if (pipe_ctx->stream->out_transfer_func->type == |
659 | TF_TYPE_DISTRIBUTED_POINTS && |
660 | cm3_helper_translate_curve_to_hw_format( |
661 | stream->out_transfer_func, |
662 | &mpc->blender_params, false0)) |
663 | params = &mpc->blender_params; |
664 | /* there are no ROM LUTs in OUTGAM */ |
665 | if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) |
666 | BREAK_TO_DEBUGGER()do { ___drm_dbg(((void *)0), DRM_UT_DRIVER, "%s():%d\n", __func__ , 666); do {} while (0); } while (0); |
667 | } |
668 | } |
669 | |
670 | mpc->funcs->set_output_gamma(mpc, mpcc_id, params); |
671 | return ret; |
672 | } |
673 | |
674 | /* Program P-State force value according to if pipe is using SubVP or not: |
675 | * 1. Reset P-State force on all pipes first |
676 | * 2. For each main pipe, force P-State disallow (P-State allow moderated by DMUB) |
677 | */ |
678 | void dcn32_subvp_update_force_pstate(struct dc *dc, struct dc_state *context) |
679 | { |
680 | int i; |
681 | int num_subvp = 0; |
682 | /* Unforce p-state for each pipe |
683 | */ |
684 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
685 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
686 | struct hubp *hubp = pipe->plane_res.hubp; |
687 | |
688 | if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) |
689 | hubp->funcs->hubp_update_force_pstate_disallow(hubp, false0); |
690 | if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) |
691 | num_subvp++; |
692 | } |
693 | |
694 | if (num_subvp == 0) |
695 | return; |
696 | |
697 | /* Loop through each pipe -- for each subvp main pipe force p-state allow equal to false. |
698 | */ |
699 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
700 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
701 | |
702 | // For SubVP + DRR, also force disallow on the DRR pipe |
703 | // (We will force allow in the DMUB sequence -- some DRR timings by default won't allow P-State so we have |
704 | // to force once the vblank is stretched). |
705 | if (pipe->stream && pipe->plane_state && (pipe->stream->mall_stream_config.type == SUBVP_MAIN || |
706 | (pipe->stream->mall_stream_config.type == SUBVP_NONE && pipe->stream->ignore_msa_timing_param))) { |
707 | struct hubp *hubp = pipe->plane_res.hubp; |
708 | |
709 | if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) |
710 | hubp->funcs->hubp_update_force_pstate_disallow(hubp, true1); |
711 | } |
712 | } |
713 | } |
714 | |
715 | /* Update MALL_SEL register based on if pipe / plane |
716 | * is a phantom pipe, main pipe, and if using MALL |
717 | * for SS. |
718 | */ |
719 | void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context) |
720 | { |
721 | int i; |
722 | unsigned int num_ways = dcn32_calculate_cab_allocation(dc, context); |
723 | bool_Bool cache_cursor = false0; |
724 | |
725 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
726 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
727 | struct hubp *hubp = pipe->plane_res.hubp; |
728 | |
729 | if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) { |
730 | int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; |
731 | |
732 | switch (hubp->curs_attr.color_format) { |
733 | case CURSOR_MODE_MONO: |
734 | cursor_size /= 2; |
735 | break; |
736 | case CURSOR_MODE_COLOR_1BIT_AND: |
737 | case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: |
738 | case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: |
739 | cursor_size *= 4; |
740 | break; |
741 | |
742 | case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: |
743 | case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: |
744 | default: |
745 | cursor_size *= 8; |
746 | break; |
747 | } |
748 | |
749 | if (cursor_size > 16384) |
750 | cache_cursor = true1; |
751 | |
752 | if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { |
753 | hubp->funcs->hubp_update_mall_sel(hubp, 1, false0); |
754 | } else { |
755 | // MALL not supported with Stereo3D |
756 | hubp->funcs->hubp_update_mall_sel(hubp, |
757 | num_ways <= dc->caps.cache_num_ways && |
758 | pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED && |
759 | pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO && |
760 | !pipe->plane_state->address.tmz_surface ? 2 : 0, |
761 | cache_cursor); |
762 | } |
763 | } |
764 | } |
765 | } |
766 | |
767 | /* Program the sub-viewport pipe configuration after the main / phantom pipes |
768 | * have been programmed in hardware. |
769 | * 1. Update force P-State for all the main pipes (disallow P-state) |
770 | * 2. Update MALL_SEL register |
771 | * 3. Program FORCE_ONE_ROW_FOR_FRAME for main subvp pipes |
772 | */ |
773 | void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context) |
774 | { |
775 | int i; |
776 | struct dce_hwseq *hws = dc->hwseq; |
777 | |
778 | // Don't force p-state disallow -- can't block dummy p-state |
779 | |
780 | // Update MALL_SEL register for each pipe |
781 | if (hws && hws->funcs.update_mall_sel) |
782 | hws->funcs.update_mall_sel(dc, context); |
783 | |
784 | // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes |
785 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
786 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
787 | struct hubp *hubp = pipe->plane_res.hubp; |
788 | |
789 | if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) { |
790 | /* TODO - remove setting CURSOR_REQ_MODE to 0 for legacy cases |
791 | * - need to investigate single pipe MPO + SubVP case to |
792 | * see if CURSOR_REQ_MODE will be back to 1 for SubVP |
793 | * when it should be 0 for MPO |
794 | */ |
795 | if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { |
796 | hubp->funcs->hubp_prepare_subvp_buffering(hubp, true1); |
797 | } |
798 | } |
799 | } |
800 | } |
801 | |
802 | void dcn32_init_hw(struct dc *dc) |
803 | { |
804 | struct abm **abms = dc->res_pool->multiple_abms; |
805 | struct dce_hwseq *hws = dc->hwseq; |
806 | struct dc_bios *dcb = dc->ctx->dc_bios; |
807 | struct resource_pool *res_pool = dc->res_pool; |
808 | int i; |
809 | int edp_num; |
810 | uint32_t backlight = MAX_BACKLIGHT_LEVEL0xFFFF; |
811 | |
812 | if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) |
813 | dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); |
814 | |
815 | // Initialize the dccg |
816 | if (res_pool->dccg->funcs->dccg_init) |
817 | res_pool->dccg->funcs->dccg_init(res_pool->dccg); |
818 | |
819 | if (!dcb->funcs->is_accelerated_mode(dcb)) { |
820 | hws->funcs.bios_golden_init(dc); |
821 | hws->funcs.disable_vga(dc->hwseq); |
822 | } |
823 | |
824 | // Set default OPTC memory power states |
825 | if (dc->debug.enable_mem_low_power.bits.optc) { |
826 | // Shutdown when unassigned and light sleep in VBLANK |
827 | REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1)generic_reg_set_ex(hws->ctx, hws->regs->ODM_MEM_PWR_CTRL3 , 0, 2, hws->shifts->ODM_MEM_UNASSIGNED_PWR_MODE, hws-> masks->ODM_MEM_UNASSIGNED_PWR_MODE, 3, hws->shifts-> ODM_MEM_VBLANK_PWR_MODE, hws->masks->ODM_MEM_VBLANK_PWR_MODE , 1); |
828 | } |
829 | |
830 | if (dc->debug.enable_mem_low_power.bits.vga) { |
831 | // Power down VGA memory |
832 | REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1)generic_reg_update_ex(hws->ctx, hws->regs->MMHUBBUB_MEM_PWR_CNTL , 1, hws->shifts->VGA_MEM_PWR_FORCE, hws->masks-> VGA_MEM_PWR_FORCE, 1); |
833 | } |
834 | |
835 | if (dc->ctx->dc_bios->fw_info_valid) { |
836 | res_pool->ref_clocks.xtalin_clock_inKhz = |
837 | dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; |
838 | |
839 | if (res_pool->dccg && res_pool->hubbub) { |
840 | (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, |
841 | dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, |
842 | &res_pool->ref_clocks.dccg_ref_clock_inKhz); |
843 | |
844 | (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, |
845 | res_pool->ref_clocks.dccg_ref_clock_inKhz, |
846 | &res_pool->ref_clocks.dchub_ref_clock_inKhz); |
847 | } else { |
848 | // Not all ASICs have DCCG sw component |
849 | res_pool->ref_clocks.dccg_ref_clock_inKhz = |
850 | res_pool->ref_clocks.xtalin_clock_inKhz; |
851 | res_pool->ref_clocks.dchub_ref_clock_inKhz = |
852 | res_pool->ref_clocks.xtalin_clock_inKhz; |
853 | } |
854 | } else |
855 | ASSERT_CRITICAL(false)do { if (({ int __ret = !!(!(0)); if (__ret) printf("WARNING %s failed at %s:%d\n" , "!(0)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn32/dcn32_hwseq.c" , 855); __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
856 | |
857 | for (i = 0; i < dc->link_count; i++) { |
858 | /* Power up AND update implementation according to the |
859 | * required signal (which may be different from the |
860 | * default signal on connector). |
861 | */ |
862 | struct dc_link *link = dc->links[i]; |
863 | |
864 | link->link_enc->funcs->hw_init(link->link_enc); |
865 | |
866 | /* Check for enabled DIG to identify enabled display */ |
867 | if (link->link_enc->funcs->is_dig_enabled && |
868 | link->link_enc->funcs->is_dig_enabled(link->link_enc)) { |
869 | link->link_status.link_active = true1; |
870 | link->phy_state.symclk_state = SYMCLK_ON_TX_ON; |
871 | if (link->link_enc->funcs->fec_is_active && |
872 | link->link_enc->funcs->fec_is_active(link->link_enc)) |
873 | link->fec_state = dc_link_fec_enabled; |
874 | } |
875 | } |
876 | |
877 | /* Power gate DSCs */ |
878 | for (i = 0; i < res_pool->res_cap->num_dsc; i++) |
879 | if (hws->funcs.dsc_pg_control != NULL((void *)0)) |
880 | hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false0); |
881 | |
882 | /* we want to turn off all dp displays before doing detection */ |
883 | dc_link_blank_all_dp_displays(dc); |
884 | |
885 | /* If taking control over from VBIOS, we may want to optimize our first |
886 | * mode set, so we need to skip powering down pipes until we know which |
887 | * pipes we want to use. |
888 | * Otherwise, if taking control is not possible, we need to power |
889 | * everything down. |
890 | */ |
891 | if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { |
892 | hws->funcs.init_pipes(dc, dc->current_state); |
893 | if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) |
894 | dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, |
895 | !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); |
896 | } |
897 | |
898 | /* In headless boot cases, DIG may be turned |
899 | * on which causes HW/SW discrepancies. |
900 | * To avoid this, power down hardware on boot |
901 | * if DIG is turned on and seamless boot not enabled |
902 | */ |
903 | if (!dc->config.seamless_boot_edp_requested) { |
904 | struct dc_link *edp_links[MAX_NUM_EDP2]; |
905 | struct dc_link *edp_link; |
906 | |
907 | get_edp_links(dc, edp_links, &edp_num); |
908 | if (edp_num) { |
909 | for (i = 0; i < edp_num; i++) { |
910 | edp_link = edp_links[i]; |
911 | if (edp_link->link_enc->funcs->is_dig_enabled && |
912 | edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && |
913 | dc->hwss.edp_backlight_control && |
914 | dc->hwss.power_down && |
915 | dc->hwss.edp_power_control) { |
916 | dc->hwss.edp_backlight_control(edp_link, false0); |
917 | dc->hwss.power_down(dc); |
918 | dc->hwss.edp_power_control(edp_link, false0); |
919 | } |
920 | } |
921 | } else { |
922 | for (i = 0; i < dc->link_count; i++) { |
923 | struct dc_link *link = dc->links[i]; |
924 | |
925 | if (link->link_enc->funcs->is_dig_enabled && |
926 | link->link_enc->funcs->is_dig_enabled(link->link_enc) && |
927 | dc->hwss.power_down) { |
928 | dc->hwss.power_down(dc); |
929 | break; |
930 | } |
931 | |
932 | } |
933 | } |
934 | } |
935 | |
936 | for (i = 0; i < res_pool->audio_count; i++) { |
937 | struct audio *audio = res_pool->audios[i]; |
938 | |
939 | audio->funcs->hw_init(audio); |
940 | } |
941 | |
942 | for (i = 0; i < dc->link_count; i++) { |
943 | struct dc_link *link = dc->links[i]; |
944 | |
945 | if (link->panel_cntl) |
946 | backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); |
947 | } |
948 | |
949 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
950 | if (abms[i] != NULL((void *)0) && abms[i]->funcs != NULL((void *)0)) |
951 | abms[i]->funcs->abm_init(abms[i], backlight); |
952 | } |
953 | |
954 | /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ |
955 | REG_WRITE(DIO_MEM_PWR_CTRL, 0)dm_write_reg_func(hws->ctx, hws->regs->DIO_MEM_PWR_CTRL , 0, __func__); |
956 | |
957 | if (!dc->debug.disable_clock_gate) { |
958 | /* enable all DCN clock gating */ |
959 | REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0)dm_write_reg_func(hws->ctx, hws->regs->DCCG_GATE_DISABLE_CNTL , 0, __func__); |
960 | |
961 | REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0)dm_write_reg_func(hws->ctx, hws->regs->DCCG_GATE_DISABLE_CNTL2 , 0, __func__); |
962 | |
963 | REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0)generic_reg_update_ex(hws->ctx, hws->regs->DCFCLK_CNTL , 1, hws->shifts->DCFCLK_GATE_DIS, hws->masks->DCFCLK_GATE_DIS , 0); |
964 | } |
965 | if (hws->funcs.enable_power_gating_plane) |
966 | hws->funcs.enable_power_gating_plane(dc->hwseq, true1); |
967 | |
968 | if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) |
969 | dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); |
970 | |
971 | if (dc->clk_mgr->funcs->notify_wm_ranges) |
972 | dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); |
973 | |
974 | if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) |
975 | dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); |
976 | |
977 | if (dc->res_pool->hubbub->funcs->force_pstate_change_control) |
978 | dc->res_pool->hubbub->funcs->force_pstate_change_control( |
979 | dc->res_pool->hubbub, false0, false0); |
980 | |
981 | if (dc->res_pool->hubbub->funcs->init_crb) |
982 | dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); |
983 | |
984 | // Get DMCUB capabilities |
985 | if (dc->ctx->dmub_srv) { |
986 | dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); |
987 | dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; |
988 | dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; |
989 | } |
990 | |
991 | /* Enable support for ODM and windowed MPO if policy flag is set */ |
992 | if (dc->debug.enable_single_display_2to1_odm_policy) |
993 | dc->config.enable_windowed_mpo_odm = true1; |
994 | } |
995 | |
996 | static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, |
997 | int opp_cnt) |
998 | { |
999 | bool_Bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); |
1000 | int flow_ctrl_cnt; |
1001 | |
1002 | if (opp_cnt >= 2) |
1003 | hblank_halved = true1; |
1004 | |
1005 | flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - |
1006 | stream->timing.h_border_left - |
1007 | stream->timing.h_border_right; |
1008 | |
1009 | if (hblank_halved) |
1010 | flow_ctrl_cnt /= 2; |
1011 | |
1012 | /* ODM combine 4:1 case */ |
1013 | if (opp_cnt == 4) |
1014 | flow_ctrl_cnt /= 2; |
1015 | |
1016 | return flow_ctrl_cnt; |
1017 | } |
1018 | |
1019 | static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool_Bool enable) |
1020 | { |
1021 | struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; |
1022 | struct dc_stream_state *stream = pipe_ctx->stream; |
1023 | struct pipe_ctx *odm_pipe; |
1024 | int opp_cnt = 1; |
1025 | |
1026 | ASSERT(dsc)do { if (({ static int __warned; int __ret = !!(!(dsc)); if ( __ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(dsc)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn32/dcn32_hwseq.c" , 1026); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
1027 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) |
1028 | opp_cnt++; |
1029 | |
1030 | if (enable) { |
1031 | struct dsc_config dsc_cfg; |
1032 | struct dsc_optc_config dsc_optc_cfg; |
1033 | enum optc_dsc_mode optc_dsc_mode; |
1034 | |
1035 | /* Enable DSC hw block */ |
1036 | dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; |
1037 | dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; |
1038 | dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; |
1039 | dsc_cfg.color_depth = stream->timing.display_color_depth; |
1040 | dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true1 : false0; |
1041 | dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; |
1042 | ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0)do { if (({ static int __warned; int __ret = !!(!(dsc_cfg.dc_dsc_cfg .num_slices_h % opp_cnt == 0)); if (__ret && !__warned ) { printf("WARNING %s failed at %s:%d\n", "!(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn32/dcn32_hwseq.c" , 1042); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
1043 | dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; |
1044 | |
1045 | dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); |
1046 | dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); |
1047 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { |
1048 | struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; |
1049 | |
1050 | ASSERT(odm_dsc)do { if (({ static int __warned; int __ret = !!(!(odm_dsc)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(odm_dsc)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn32/dcn32_hwseq.c" , 1050); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
1051 | odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); |
1052 | odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); |
1053 | } |
1054 | dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; |
1055 | dsc_cfg.pic_width *= opp_cnt; |
1056 | |
1057 | optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; |
1058 | |
1059 | /* Enable DSC in OPTC */ |
1060 | DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst)___drm_dbg(((void *)0), DRM_UT_KMS, "Setting optc DSC config for tg instance %d:" , pipe_ctx->stream_res.tg->inst); |
1061 | pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, |
1062 | optc_dsc_mode, |
1063 | dsc_optc_cfg.bytes_per_pixel, |
1064 | dsc_optc_cfg.slice_width); |
1065 | } else { |
1066 | /* disable DSC in OPTC */ |
1067 | pipe_ctx->stream_res.tg->funcs->set_dsc_config( |
1068 | pipe_ctx->stream_res.tg, |
1069 | OPTC_DSC_DISABLED, 0, 0); |
1070 | |
1071 | /* disable DSC block */ |
1072 | dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); |
1073 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { |
1074 | ASSERT(odm_pipe->stream_res.dsc)do { if (({ static int __warned; int __ret = !!(!(odm_pipe-> stream_res.dsc)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(odm_pipe->stream_res.dsc)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn32/dcn32_hwseq.c" , 1074); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
1075 | odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); |
1076 | } |
1077 | } |
1078 | } |
1079 | |
1080 | /* |
1081 | * Given any pipe_ctx, return the total ODM combine factor, and optionally return |
1082 | * the OPPids which are used |
1083 | * */ |
1084 | static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_instances) |
1085 | { |
1086 | unsigned int opp_count = 1; |
1087 | struct pipe_ctx *odm_pipe; |
1088 | |
1089 | /* First get to the top pipe */ |
1090 | for (odm_pipe = pipe_ctx; odm_pipe->prev_odm_pipe; odm_pipe = odm_pipe->prev_odm_pipe) |
1091 | ; |
1092 | |
1093 | /* First pipe is always used */ |
1094 | if (opp_instances) |
1095 | opp_instances[0] = odm_pipe->stream_res.opp->inst; |
1096 | |
1097 | /* Find and count odm pipes, if any */ |
1098 | for (odm_pipe = odm_pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { |
1099 | if (opp_instances) |
1100 | opp_instances[opp_count] = odm_pipe->stream_res.opp->inst; |
1101 | opp_count++; |
1102 | } |
1103 | |
1104 | return opp_count; |
1105 | } |
1106 | |
1107 | void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) |
1108 | { |
1109 | struct pipe_ctx *odm_pipe; |
1110 | int opp_cnt = 0; |
1111 | int opp_inst[MAX_PIPES6] = {0}; |
1112 | bool_Bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing)); |
1113 | struct mpc_dwb_flow_control flow_control; |
1114 | struct mpc *mpc = dc->res_pool->mpc; |
1115 | int i; |
1116 | |
1117 | opp_cnt = get_odm_config(pipe_ctx, opp_inst); |
1118 | |
1119 | if (opp_cnt > 1) |
1120 | pipe_ctx->stream_res.tg->funcs->set_odm_combine( |
1121 | pipe_ctx->stream_res.tg, |
1122 | opp_inst, opp_cnt, |
1123 | &pipe_ctx->stream->timing); |
1124 | else |
1125 | pipe_ctx->stream_res.tg->funcs->set_odm_bypass( |
1126 | pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); |
1127 | |
1128 | rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; |
1129 | flow_control.flow_ctrl_mode = 0; |
1130 | flow_control.flow_ctrl_cnt0 = 0x80; |
1131 | flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt); |
1132 | if (mpc->funcs->set_out_rate_control) { |
1133 | for (i = 0; i < opp_cnt; ++i) { |
1134 | mpc->funcs->set_out_rate_control( |
1135 | mpc, opp_inst[i], |
1136 | true1, |
1137 | rate_control_2x_pclk, |
1138 | &flow_control); |
1139 | } |
1140 | } |
1141 | |
1142 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { |
1143 | odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( |
1144 | odm_pipe->stream_res.opp, |
1145 | true1); |
1146 | } |
1147 | |
1148 | if (pipe_ctx->stream_res.dsc) { |
1149 | struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; |
1150 | |
1151 | update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC); |
1152 | |
1153 | /* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */ |
1154 | if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe && |
1155 | current_pipe_ctx->next_odm_pipe->stream_res.dsc) { |
1156 | struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc; |
1157 | /* disconnect DSC block from stream */ |
1158 | dsc->funcs->dsc_disconnect(dsc); |
1159 | } |
1160 | } |
1161 | } |
1162 | |
1163 | unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div) |
1164 | { |
1165 | struct dc_stream_state *stream = pipe_ctx->stream; |
1166 | unsigned int odm_combine_factor = 0; |
1167 | bool_Bool two_pix_per_container = false0; |
1168 | |
1169 | two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing); |
1170 | odm_combine_factor = get_odm_config(pipe_ctx, NULL((void *)0)); |
1171 | |
1172 | if (is_dp_128b_132b_signal(pipe_ctx)) { |
1173 | *k1_div = PIXEL_RATE_DIV_BY_1; |
1174 | *k2_div = PIXEL_RATE_DIV_BY_1; |
1175 | } else if (dc_is_hdmi_tmds_signal(stream->signal) || dc_is_dvi_signal(stream->signal)) { |
1176 | *k1_div = PIXEL_RATE_DIV_BY_1; |
1177 | if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) |
1178 | *k2_div = PIXEL_RATE_DIV_BY_2; |
1179 | else |
1180 | *k2_div = PIXEL_RATE_DIV_BY_4; |
1181 | } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { |
1182 | if (two_pix_per_container) { |
1183 | *k1_div = PIXEL_RATE_DIV_BY_1; |
1184 | *k2_div = PIXEL_RATE_DIV_BY_2; |
1185 | } else { |
1186 | *k1_div = PIXEL_RATE_DIV_BY_1; |
1187 | *k2_div = PIXEL_RATE_DIV_BY_4; |
1188 | if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) |
1189 | *k2_div = PIXEL_RATE_DIV_BY_2; |
1190 | } |
1191 | } |
1192 | |
1193 | if ((*k1_div == PIXEL_RATE_DIV_NA) && (*k2_div == PIXEL_RATE_DIV_NA)) |
1194 | ASSERT(false)do { if (({ static int __warned; int __ret = !!(!(0)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(0)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn32/dcn32_hwseq.c" , 1194); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
1195 | |
1196 | return odm_combine_factor; |
1197 | } |
1198 | |
1199 | void dcn32_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx) |
1200 | { |
1201 | uint32_t pix_per_cycle = 1; |
1202 | uint32_t odm_combine_factor = 1; |
1203 | |
1204 | if (!pipe_ctx || !pipe_ctx->stream || !pipe_ctx->stream_res.stream_enc) |
1205 | return; |
1206 | |
1207 | odm_combine_factor = get_odm_config(pipe_ctx, NULL((void *)0)); |
1208 | if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1 |
1209 | || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) |
1210 | pix_per_cycle = 2; |
1211 | |
1212 | if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode) |
1213 | pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc, |
1214 | pix_per_cycle); |
1215 | } |
1216 | |
1217 | void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx, |
1218 | struct dc_link_settings *link_settings) |
1219 | { |
1220 | struct encoder_unblank_param params = {0}; |
1221 | struct dc_stream_state *stream = pipe_ctx->stream; |
1222 | struct dc_link *link = stream->link; |
1223 | struct dce_hwseq *hws = link->dc->hwseq; |
1224 | struct pipe_ctx *odm_pipe; |
1225 | uint32_t pix_per_cycle = 1; |
1226 | |
1227 | params.opp_cnt = 1; |
1228 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) |
1229 | params.opp_cnt++; |
1230 | |
1231 | /* only 3 items below are used by unblank */ |
1232 | params.timing = pipe_ctx->stream->timing; |
1233 | |
1234 | params.link_settings.link_rate = link_settings->link_rate; |
1235 | |
1236 | if (is_dp_128b_132b_signal(pipe_ctx)) { |
1237 | /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ |
1238 | pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( |
1239 | pipe_ctx->stream_res.hpo_dp_stream_enc, |
1240 | pipe_ctx->stream_res.tg->inst); |
1241 | } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { |
1242 | if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1 |
1243 | || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) { |
1244 | params.timing.pix_clk_100hz /= 2; |
1245 | pix_per_cycle = 2; |
1246 | } |
1247 | pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( |
1248 | pipe_ctx->stream_res.stream_enc, pix_per_cycle > 1); |
1249 | pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); |
1250 | } |
1251 | |
1252 | if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) |
1253 | hws->funcs.edp_backlight_control(link, true1); |
1254 | } |
1255 | |
1256 | bool_Bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx) |
1257 | { |
1258 | struct dc *dc = pipe_ctx->stream->ctx->dc; |
1259 | |
1260 | if (!is_h_timing_divisible_by_2(pipe_ctx->stream)) |
1261 | return false0; |
1262 | |
1263 | if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) && |
1264 | dc->debug.enable_dp_dig_pixel_rate_div_policy) |
1265 | return true1; |
1266 | return false0; |
1267 | } |
1268 | |
1269 | static void apply_symclk_on_tx_off_wa(struct dc_link *link) |
1270 | { |
1271 | /* There are use cases where SYMCLK is referenced by OTG. For instance |
1272 | * for TMDS signal, OTG relies SYMCLK even if TX video output is off. |
1273 | * However current link interface will power off PHY when disabling link |
1274 | * output. This will turn off SYMCLK generated by PHY. The workaround is |
1275 | * to identify such case where SYMCLK is still in use by OTG when we |
1276 | * power off PHY. When this is detected, we will temporarily power PHY |
1277 | * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling |
1278 | * program_pix_clk interface. When OTG is disabled, we will then power |
1279 | * off PHY by calling disable link output again. |
1280 | * |
1281 | * In future dcn generations, we plan to rework transmitter control |
1282 | * interface so that we could have an option to set SYMCLK ON TX OFF |
1283 | * state in one step without this workaround |
1284 | */ |
1285 | |
1286 | struct dc *dc = link->ctx->dc; |
1287 | struct pipe_ctx *pipe_ctx = NULL((void *)0); |
1288 | uint8_t i; |
1289 | |
1290 | if (link->phy_state.symclk_ref_cnts.otg > 0) { |
1291 | for (i = 0; i < MAX_PIPES6; i++) { |
1292 | pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; |
1293 | if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL((void *)0)) { |
1294 | pipe_ctx->clock_source->funcs->program_pix_clk( |
1295 | pipe_ctx->clock_source, |
1296 | &pipe_ctx->stream_res.pix_clk_params, |
1297 | dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings), |
1298 | &pipe_ctx->pll_settings); |
1299 | link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; |
1300 | break; |
1301 | } |
1302 | } |
1303 | } |
1304 | } |
1305 | |
1306 | void dcn32_disable_link_output(struct dc_link *link, |
1307 | const struct link_resource *link_res, |
1308 | enum amd_signal_type signal) |
1309 | { |
1310 | struct dc *dc = link->ctx->dc; |
1311 | const struct link_hwss *link_hwss = get_link_hwss(link, link_res); |
1312 | struct dmcu *dmcu = dc->res_pool->dmcu; |
1313 | |
1314 | if (signal == SIGNAL_TYPE_EDP && |
1315 | link->dc->hwss.edp_backlight_control) |
1316 | link->dc->hwss.edp_backlight_control(link, false0); |
1317 | else if (dmcu != NULL((void *)0) && dmcu->funcs->lock_phy) |
1318 | dmcu->funcs->lock_phy(dmcu); |
1319 | |
1320 | link_hwss->disable_link_output(link, link_res, signal); |
1321 | link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; |
1322 | |
1323 | if (signal == SIGNAL_TYPE_EDP && |
1324 | link->dc->hwss.edp_backlight_control) |
1325 | link->dc->hwss.edp_power_control(link, false0); |
1326 | else if (dmcu != NULL((void *)0) && dmcu->funcs->lock_phy) |
1327 | dmcu->funcs->unlock_phy(dmcu); |
1328 | |
1329 | dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); |
1330 | |
1331 | apply_symclk_on_tx_off_wa(link); |
1332 | } |
1333 | |
1334 | /* For SubVP the main pipe can have a viewport position change |
1335 | * without a full update. In this case we must also update the |
1336 | * viewport positions for the phantom pipe accordingly. |
1337 | */ |
1338 | void dcn32_update_phantom_vp_position(struct dc *dc, |
1339 | struct dc_state *context, |
1340 | struct pipe_ctx *phantom_pipe) |
1341 | { |
1342 | uint32_t i; |
1343 | struct dc_plane_state *phantom_plane = phantom_pipe->plane_state; |
1344 | |
1345 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
1346 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
1347 | |
1348 | if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN && |
1349 | pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) { |
1350 | if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) { |
1351 | |
1352 | phantom_plane->src_rect.x = pipe->plane_state->src_rect.x; |
1353 | phantom_plane->src_rect.y = pipe->plane_state->src_rect.y; |
1354 | phantom_plane->clip_rect.x = pipe->plane_state->clip_rect.x; |
1355 | phantom_plane->dst_rect.x = pipe->plane_state->dst_rect.x; |
1356 | phantom_plane->dst_rect.y = pipe->plane_state->dst_rect.y; |
1357 | |
1358 | phantom_pipe->plane_state->update_flags.bits.position_change = 1; |
1359 | resource_build_scaling_params(phantom_pipe); |
1360 | return; |
1361 | } |
1362 | } |
1363 | } |
1364 | } |
1365 | |
1366 | bool_Bool dcn32_dsc_pg_status( |
1367 | struct dce_hwseq *hws, |
1368 | unsigned int dsc_inst) |
1369 | { |
1370 | uint32_t pwr_status = 0; |
1371 | |
1372 | switch (dsc_inst) { |
1373 | case 0: /* DSC0 */ |
1374 | REG_GET(DOMAIN16_PG_STATUS,generic_reg_get(hws->ctx, hws->regs->DOMAIN16_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, &pwr_status) |
1375 | DOMAIN_PGFSM_PWR_STATUS, &pwr_status)generic_reg_get(hws->ctx, hws->regs->DOMAIN16_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, &pwr_status); |
1376 | break; |
1377 | case 1: /* DSC1 */ |
1378 | |
1379 | REG_GET(DOMAIN17_PG_STATUS,generic_reg_get(hws->ctx, hws->regs->DOMAIN17_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, &pwr_status) |
1380 | DOMAIN_PGFSM_PWR_STATUS, &pwr_status)generic_reg_get(hws->ctx, hws->regs->DOMAIN17_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, &pwr_status); |
1381 | break; |
1382 | case 2: /* DSC2 */ |
1383 | REG_GET(DOMAIN18_PG_STATUS,generic_reg_get(hws->ctx, hws->regs->DOMAIN18_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, &pwr_status) |
1384 | DOMAIN_PGFSM_PWR_STATUS, &pwr_status)generic_reg_get(hws->ctx, hws->regs->DOMAIN18_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, &pwr_status); |
1385 | break; |
1386 | case 3: /* DSC3 */ |
1387 | REG_GET(DOMAIN19_PG_STATUS,generic_reg_get(hws->ctx, hws->regs->DOMAIN19_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, &pwr_status) |
1388 | DOMAIN_PGFSM_PWR_STATUS, &pwr_status)generic_reg_get(hws->ctx, hws->regs->DOMAIN19_PG_STATUS , hws->shifts->DOMAIN_PGFSM_PWR_STATUS, hws->masks-> DOMAIN_PGFSM_PWR_STATUS, &pwr_status); |
1389 | break; |
1390 | default: |
1391 | BREAK_TO_DEBUGGER()do { ___drm_dbg(((void *)0), DRM_UT_DRIVER, "%s():%d\n", __func__ , 1391); do {} while (0); } while (0); |
1392 | break; |
1393 | } |
1394 | |
1395 | return pwr_status == 0; |
1396 | } |
1397 | |
1398 | void dcn32_update_dsc_pg(struct dc *dc, |
1399 | struct dc_state *context, |
1400 | bool_Bool safe_to_disable) |
1401 | { |
1402 | struct dce_hwseq *hws = dc->hwseq; |
1403 | int i; |
1404 | |
1405 | for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { |
1406 | struct display_stream_compressor *dsc = dc->res_pool->dscs[i]; |
1407 | bool_Bool is_dsc_ungated = hws->funcs.dsc_pg_status(hws, dsc->inst); |
1408 | |
1409 | if (context->res_ctx.is_dsc_acquired[i]) { |
1410 | if (!is_dsc_ungated) { |
1411 | hws->funcs.dsc_pg_control(hws, dsc->inst, true1); |
1412 | } |
1413 | } else if (safe_to_disable) { |
1414 | if (is_dsc_ungated) { |
1415 | hws->funcs.dsc_pg_control(hws, dsc->inst, false0); |
1416 | } |
1417 | } |
1418 | } |
1419 | } |