| File: | dev/pci/drm/amd/display/dc/dcn20/dcn20_hwseq.c |
| Warning: | line 878, column 2 Value stored to 'result' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* |
| 2 | * Copyright 2016 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | * Authors: AMD |
| 23 | * |
| 24 | */ |
| 25 | #include <linux/delay.h> |
| 26 | |
| 27 | #include "dm_services.h" |
| 28 | #include "basics/dc_common.h" |
| 29 | #include "dm_helpers.h" |
| 30 | #include "core_types.h" |
| 31 | #include "resource.h" |
| 32 | #include "dcn20_resource.h" |
| 33 | #include "dcn20_hwseq.h" |
| 34 | #include "dce/dce_hwseq.h" |
| 35 | #include "dcn20_dsc.h" |
| 36 | #include "dcn20_optc.h" |
| 37 | #include "abm.h" |
| 38 | #include "clk_mgr.h" |
| 39 | #include "dmcu.h" |
| 40 | #include "hubp.h" |
| 41 | #include "timing_generator.h" |
| 42 | #include "opp.h" |
| 43 | #include "ipp.h" |
| 44 | #include "mpc.h" |
| 45 | #include "mcif_wb.h" |
| 46 | #include "dchubbub.h" |
| 47 | #include "reg_helper.h" |
| 48 | #include "dcn10/dcn10_cm_common.h" |
| 49 | #include "dc_link_dp.h" |
| 50 | #include "vm_helper.h" |
| 51 | #include "dccg.h" |
| 52 | #include "dc_dmub_srv.h" |
| 53 | #include "dce/dmub_hw_lock_mgr.h" |
| 54 | |
| 55 | #define DC_LOGGER_INIT(logger) |
| 56 | |
| 57 | #define CTXhws->ctx \ |
| 58 | hws->ctx |
| 59 | #define REG(reg)hws->regs->reg\ |
| 60 | hws->regs->reg |
| 61 | |
| 62 | #undef FN |
| 63 | #define FN(reg_name, field_name)hws->shifts->field_name, hws->masks->field_name \ |
| 64 | hws->shifts->field_name, hws->masks->field_name |
| 65 | |
| 66 | static int find_free_gsl_group(const struct dc *dc) |
| 67 | { |
| 68 | if (dc->res_pool->gsl_groups.gsl_0 == 0) |
| 69 | return 1; |
| 70 | if (dc->res_pool->gsl_groups.gsl_1 == 0) |
| 71 | return 2; |
| 72 | if (dc->res_pool->gsl_groups.gsl_2 == 0) |
| 73 | return 3; |
| 74 | |
| 75 | return 0; |
| 76 | } |
| 77 | |
| 78 | /* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) |
| 79 | * This is only used to lock pipes in pipe splitting case with immediate flip |
| 80 | * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, |
| 81 | * so we get tearing with freesync since we cannot flip multiple pipes |
| 82 | * atomically. |
| 83 | * We use GSL for this: |
| 84 | * - immediate flip: find first available GSL group if not already assigned |
| 85 | * program gsl with that group, set current OTG as master |
| 86 | * and always us 0x4 = AND of flip_ready from all pipes |
| 87 | * - vsync flip: disable GSL if used |
| 88 | * |
| 89 | * Groups in stream_res are stored as +1 from HW registers, i.e. |
| 90 | * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 |
| 91 | * Using a magic value like -1 would require tracking all inits/resets |
| 92 | */ |
| 93 | static void dcn20_setup_gsl_group_as_lock( |
| 94 | const struct dc *dc, |
| 95 | struct pipe_ctx *pipe_ctx, |
| 96 | bool_Bool enable) |
| 97 | { |
| 98 | struct gsl_params gsl; |
| 99 | int group_idx; |
| 100 | |
| 101 | memset(&gsl, 0, sizeof(struct gsl_params))__builtin_memset((&gsl), (0), (sizeof(struct gsl_params)) ); |
| 102 | |
| 103 | if (enable) { |
| 104 | /* return if group already assigned since GSL was set up |
| 105 | * for vsync flip, we would unassign so it can't be "left over" |
| 106 | */ |
| 107 | if (pipe_ctx->stream_res.gsl_group > 0) |
| 108 | return; |
| 109 | |
| 110 | group_idx = find_free_gsl_group(dc); |
| 111 | ASSERT(group_idx != 0)do { if (({ static int __warned; int __ret = !!(!(group_idx != 0)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(group_idx != 0)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn20/dcn20_hwseq.c" , 111); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
| 112 | pipe_ctx->stream_res.gsl_group = group_idx; |
| 113 | |
| 114 | /* set gsl group reg field and mark resource used */ |
| 115 | switch (group_idx) { |
| 116 | case 1: |
| 117 | gsl.gsl0_en = 1; |
| 118 | dc->res_pool->gsl_groups.gsl_0 = 1; |
| 119 | break; |
| 120 | case 2: |
| 121 | gsl.gsl1_en = 1; |
| 122 | dc->res_pool->gsl_groups.gsl_1 = 1; |
| 123 | break; |
| 124 | case 3: |
| 125 | gsl.gsl2_en = 1; |
| 126 | dc->res_pool->gsl_groups.gsl_2 = 1; |
| 127 | break; |
| 128 | default: |
| 129 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 129); do {} while (0); } while (0); |
| 130 | return; // invalid case |
| 131 | } |
| 132 | gsl.gsl_master_en = 1; |
| 133 | } else { |
| 134 | group_idx = pipe_ctx->stream_res.gsl_group; |
| 135 | if (group_idx == 0) |
| 136 | return; // if not in use, just return |
| 137 | |
| 138 | pipe_ctx->stream_res.gsl_group = 0; |
| 139 | |
| 140 | /* unset gsl group reg field and mark resource free */ |
| 141 | switch (group_idx) { |
| 142 | case 1: |
| 143 | gsl.gsl0_en = 0; |
| 144 | dc->res_pool->gsl_groups.gsl_0 = 0; |
| 145 | break; |
| 146 | case 2: |
| 147 | gsl.gsl1_en = 0; |
| 148 | dc->res_pool->gsl_groups.gsl_1 = 0; |
| 149 | break; |
| 150 | case 3: |
| 151 | gsl.gsl2_en = 0; |
| 152 | dc->res_pool->gsl_groups.gsl_2 = 0; |
| 153 | break; |
| 154 | default: |
| 155 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 155); do {} while (0); } while (0); |
| 156 | return; |
| 157 | } |
| 158 | gsl.gsl_master_en = 0; |
| 159 | } |
| 160 | |
| 161 | /* at this point we want to program whether it's to enable or disable */ |
| 162 | if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL((void *)0) && |
| 163 | pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL((void *)0)) { |
| 164 | pipe_ctx->stream_res.tg->funcs->set_gsl( |
| 165 | pipe_ctx->stream_res.tg, |
| 166 | &gsl); |
| 167 | |
| 168 | pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( |
| 169 | pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); |
| 170 | } else |
| 171 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 171); do {} while (0); } while (0); |
| 172 | } |
| 173 | |
| 174 | void dcn20_set_flip_control_gsl( |
| 175 | struct pipe_ctx *pipe_ctx, |
| 176 | bool_Bool flip_immediate) |
| 177 | { |
| 178 | if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) |
| 179 | pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( |
| 180 | pipe_ctx->plane_res.hubp, flip_immediate); |
| 181 | |
| 182 | } |
| 183 | |
| 184 | void dcn20_enable_power_gating_plane( |
| 185 | struct dce_hwseq *hws, |
| 186 | bool_Bool enable) |
| 187 | { |
| 188 | bool_Bool force_on = true1; /* disable power gating */ |
| 189 | |
| 190 | if (enable) |
| 191 | force_on = false0; |
| 192 | |
| 193 | /* DCHUBP0/1/2/3/4/5 */ |
| 194 | REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN0_PG_CONFIG , 1, hws->shifts->DOMAIN0_POWER_FORCEON, hws->masks-> DOMAIN0_POWER_FORCEON, force_on); |
| 195 | REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN2_PG_CONFIG , 1, hws->shifts->DOMAIN2_POWER_FORCEON, hws->masks-> DOMAIN2_POWER_FORCEON, force_on); |
| 196 | REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN4_PG_CONFIG , 1, hws->shifts->DOMAIN4_POWER_FORCEON, hws->masks-> DOMAIN4_POWER_FORCEON, force_on); |
| 197 | REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN6_PG_CONFIG , 1, hws->shifts->DOMAIN6_POWER_FORCEON, hws->masks-> DOMAIN6_POWER_FORCEON, force_on); |
| 198 | if (REG(DOMAIN8_PG_CONFIG)hws->regs->DOMAIN8_PG_CONFIG) |
| 199 | REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN8_PG_CONFIG , 1, hws->shifts->DOMAIN8_POWER_FORCEON, hws->masks-> DOMAIN8_POWER_FORCEON, force_on); |
| 200 | if (REG(DOMAIN10_PG_CONFIG)hws->regs->DOMAIN10_PG_CONFIG) |
| 201 | REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN10_PG_CONFIG , 1, hws->shifts->DOMAIN8_POWER_FORCEON, hws->masks-> DOMAIN8_POWER_FORCEON, force_on); |
| 202 | |
| 203 | /* DPP0/1/2/3/4/5 */ |
| 204 | REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN1_PG_CONFIG , 1, hws->shifts->DOMAIN1_POWER_FORCEON, hws->masks-> DOMAIN1_POWER_FORCEON, force_on); |
| 205 | REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN3_PG_CONFIG , 1, hws->shifts->DOMAIN3_POWER_FORCEON, hws->masks-> DOMAIN3_POWER_FORCEON, force_on); |
| 206 | REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN5_PG_CONFIG , 1, hws->shifts->DOMAIN5_POWER_FORCEON, hws->masks-> DOMAIN5_POWER_FORCEON, force_on); |
| 207 | REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN7_PG_CONFIG , 1, hws->shifts->DOMAIN7_POWER_FORCEON, hws->masks-> DOMAIN7_POWER_FORCEON, force_on); |
| 208 | if (REG(DOMAIN9_PG_CONFIG)hws->regs->DOMAIN9_PG_CONFIG) |
| 209 | REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN9_PG_CONFIG , 1, hws->shifts->DOMAIN9_POWER_FORCEON, hws->masks-> DOMAIN9_POWER_FORCEON, force_on); |
| 210 | if (REG(DOMAIN11_PG_CONFIG)hws->regs->DOMAIN11_PG_CONFIG) |
| 211 | REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN11_PG_CONFIG , 1, hws->shifts->DOMAIN9_POWER_FORCEON, hws->masks-> DOMAIN9_POWER_FORCEON, force_on); |
| 212 | |
| 213 | /* DCS0/1/2/3/4/5 */ |
| 214 | REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN16_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN16_PG_CONFIG , 1, hws->shifts->DOMAIN16_POWER_FORCEON, hws->masks ->DOMAIN16_POWER_FORCEON, force_on); |
| 215 | REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN17_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN17_PG_CONFIG , 1, hws->shifts->DOMAIN17_POWER_FORCEON, hws->masks ->DOMAIN17_POWER_FORCEON, force_on); |
| 216 | REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN18_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN18_PG_CONFIG , 1, hws->shifts->DOMAIN18_POWER_FORCEON, hws->masks ->DOMAIN18_POWER_FORCEON, force_on); |
| 217 | if (REG(DOMAIN19_PG_CONFIG)hws->regs->DOMAIN19_PG_CONFIG) |
| 218 | REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN19_PG_CONFIG , 1, hws->shifts->DOMAIN19_POWER_FORCEON, hws->masks ->DOMAIN19_POWER_FORCEON, force_on); |
| 219 | if (REG(DOMAIN20_PG_CONFIG)hws->regs->DOMAIN20_PG_CONFIG) |
| 220 | REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN20_PG_CONFIG , 1, hws->shifts->DOMAIN20_POWER_FORCEON, hws->masks ->DOMAIN20_POWER_FORCEON, force_on); |
| 221 | if (REG(DOMAIN21_PG_CONFIG)hws->regs->DOMAIN21_PG_CONFIG) |
| 222 | REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN21_PG_CONFIG , 1, hws->shifts->DOMAIN21_POWER_FORCEON, hws->masks ->DOMAIN21_POWER_FORCEON, force_on); |
| 223 | } |
| 224 | |
| 225 | void dcn20_dccg_init(struct dce_hwseq *hws) |
| 226 | { |
| 227 | /* |
| 228 | * set MICROSECOND_TIME_BASE_DIV |
| 229 | * 100Mhz refclk -> 0x120264 |
| 230 | * 27Mhz refclk -> 0x12021b |
| 231 | * 48Mhz refclk -> 0x120230 |
| 232 | * |
| 233 | */ |
| 234 | REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x120264)dm_write_reg_func(hws->ctx, hws->regs->MICROSECOND_TIME_BASE_DIV , 0x120264, __func__); |
| 235 | |
| 236 | /* |
| 237 | * set MILLISECOND_TIME_BASE_DIV |
| 238 | * 100Mhz refclk -> 0x1186a0 |
| 239 | * 27Mhz refclk -> 0x106978 |
| 240 | * 48Mhz refclk -> 0x10bb80 |
| 241 | * |
| 242 | */ |
| 243 | REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0)dm_write_reg_func(hws->ctx, hws->regs->MILLISECOND_TIME_BASE_DIV , 0x1186a0, __func__); |
| 244 | |
| 245 | /* This value is dependent on the hardware pipeline delay so set once per SOC */ |
| 246 | REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c)dm_write_reg_func(hws->ctx, hws->regs->DISPCLK_FREQ_CHANGE_CNTL , 0xe01003c, __func__); |
| 247 | } |
| 248 | |
| 249 | void dcn20_disable_vga( |
| 250 | struct dce_hwseq *hws) |
| 251 | { |
| 252 | REG_WRITE(D1VGA_CONTROL, 0)dm_write_reg_func(hws->ctx, hws->regs->D1VGA_CONTROL , 0, __func__); |
| 253 | REG_WRITE(D2VGA_CONTROL, 0)dm_write_reg_func(hws->ctx, hws->regs->D2VGA_CONTROL , 0, __func__); |
| 254 | REG_WRITE(D3VGA_CONTROL, 0)dm_write_reg_func(hws->ctx, hws->regs->D3VGA_CONTROL , 0, __func__); |
| 255 | REG_WRITE(D4VGA_CONTROL, 0)dm_write_reg_func(hws->ctx, hws->regs->D4VGA_CONTROL , 0, __func__); |
| 256 | REG_WRITE(D5VGA_CONTROL, 0)dm_write_reg_func(hws->ctx, hws->regs->D5VGA_CONTROL , 0, __func__); |
| 257 | REG_WRITE(D6VGA_CONTROL, 0)dm_write_reg_func(hws->ctx, hws->regs->D6VGA_CONTROL , 0, __func__); |
| 258 | } |
| 259 | |
| 260 | void dcn20_program_triple_buffer( |
| 261 | const struct dc *dc, |
| 262 | struct pipe_ctx *pipe_ctx, |
| 263 | bool_Bool enable_triple_buffer) |
| 264 | { |
| 265 | if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) { |
| 266 | pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer( |
| 267 | pipe_ctx->plane_res.hubp, |
| 268 | enable_triple_buffer); |
| 269 | } |
| 270 | } |
| 271 | |
| 272 | /* Blank pixel data during initialization */ |
| 273 | void dcn20_init_blank( |
| 274 | struct dc *dc, |
| 275 | struct timing_generator *tg) |
| 276 | { |
| 277 | struct dce_hwseq *hws = dc->hwseq; |
| 278 | enum dc_color_space color_space; |
| 279 | struct tg_color black_color = {0}; |
| 280 | struct output_pixel_processor *opp = NULL((void *)0); |
| 281 | struct output_pixel_processor *bottom_opp = NULL((void *)0); |
| 282 | uint32_t num_opps, opp_id_src0, opp_id_src1; |
| 283 | uint32_t otg_active_width, otg_active_height; |
| 284 | |
| 285 | /* program opp dpg blank color */ |
| 286 | color_space = COLOR_SPACE_SRGB; |
| 287 | color_space_to_black_color(dc, color_space, &black_color); |
| 288 | |
| 289 | /* get the OTG active size */ |
| 290 | tg->funcs->get_otg_active_size(tg, |
| 291 | &otg_active_width, |
| 292 | &otg_active_height); |
| 293 | |
| 294 | /* get the OPTC source */ |
| 295 | tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); |
| 296 | |
| 297 | if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) { |
| 298 | ASSERT(false)do { if (({ static int __warned; int __ret = !!(!(0)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(0)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn20/dcn20_hwseq.c" , 298); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
| 299 | return; |
| 300 | } |
| 301 | opp = dc->res_pool->opps[opp_id_src0]; |
| 302 | |
| 303 | if (num_opps == 2) { |
| 304 | otg_active_width = otg_active_width / 2; |
| 305 | |
| 306 | if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) { |
| 307 | ASSERT(false)do { if (({ static int __warned; int __ret = !!(!(0)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(0)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn20/dcn20_hwseq.c" , 307); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
| 308 | return; |
| 309 | } |
| 310 | bottom_opp = dc->res_pool->opps[opp_id_src1]; |
| 311 | } |
| 312 | |
| 313 | opp->funcs->opp_set_disp_pattern_generator( |
| 314 | opp, |
| 315 | CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, |
| 316 | CONTROLLER_DP_COLOR_SPACE_UDEFINED, |
| 317 | COLOR_DEPTH_UNDEFINED, |
| 318 | &black_color, |
| 319 | otg_active_width, |
| 320 | otg_active_height, |
| 321 | 0); |
| 322 | |
| 323 | if (num_opps == 2) { |
| 324 | bottom_opp->funcs->opp_set_disp_pattern_generator( |
| 325 | bottom_opp, |
| 326 | CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, |
| 327 | CONTROLLER_DP_COLOR_SPACE_UDEFINED, |
| 328 | COLOR_DEPTH_UNDEFINED, |
| 329 | &black_color, |
| 330 | otg_active_width, |
| 331 | otg_active_height, |
| 332 | 0); |
| 333 | } |
| 334 | |
| 335 | hws->funcs.wait_for_blank_complete(opp); |
| 336 | } |
| 337 | |
| 338 | void dcn20_dsc_pg_control( |
| 339 | struct dce_hwseq *hws, |
| 340 | unsigned int dsc_inst, |
| 341 | bool_Bool power_on) |
| 342 | { |
| 343 | uint32_t power_gate = power_on ? 0 : 1; |
| 344 | uint32_t pwr_status = power_on ? 0 : 2; |
| 345 | uint32_t org_ip_request_cntl = 0; |
| 346 | |
| 347 | if (hws->ctx->dc->debug.disable_dsc_power_gate) |
| 348 | return; |
| 349 | |
| 350 | if (REG(DOMAIN16_PG_CONFIG)hws->regs->DOMAIN16_PG_CONFIG == 0) |
| 351 | return; |
| 352 | |
| 353 | REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl)generic_reg_get(hws->ctx, hws->regs->DC_IP_REQUEST_CNTL , hws->shifts->IP_REQUEST_EN, hws->masks->IP_REQUEST_EN , &org_ip_request_cntl); |
| 354 | if (org_ip_request_cntl == 0) |
| 355 | REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1)generic_reg_set_ex(hws->ctx, hws->regs->DC_IP_REQUEST_CNTL , 0, 1, hws->shifts->IP_REQUEST_EN, hws->masks->IP_REQUEST_EN , 1); |
| 356 | |
| 357 | switch (dsc_inst) { |
| 358 | case 0: /* DSC0 */ |
| 359 | REG_UPDATE(DOMAIN16_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN16_PG_CONFIG , 1, hws->shifts->DOMAIN16_POWER_GATE, hws->masks-> DOMAIN16_POWER_GATE, power_gate) |
| 360 | DOMAIN16_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN16_PG_CONFIG , 1, hws->shifts->DOMAIN16_POWER_GATE, hws->masks-> DOMAIN16_POWER_GATE, power_gate); |
| 361 | |
| 362 | REG_WAIT(DOMAIN16_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN16_PG_STATUS , hws->shifts->DOMAIN16_PGFSM_PWR_STATUS, hws->masks ->DOMAIN16_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 364) |
| 363 | DOMAIN16_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN16_PG_STATUS , hws->shifts->DOMAIN16_PGFSM_PWR_STATUS, hws->masks ->DOMAIN16_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 364) |
| 364 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN16_PG_STATUS , hws->shifts->DOMAIN16_PGFSM_PWR_STATUS, hws->masks ->DOMAIN16_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 364); |
| 365 | break; |
| 366 | case 1: /* DSC1 */ |
| 367 | REG_UPDATE(DOMAIN17_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN17_PG_CONFIG , 1, hws->shifts->DOMAIN17_POWER_GATE, hws->masks-> DOMAIN17_POWER_GATE, power_gate) |
| 368 | DOMAIN17_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN17_PG_CONFIG , 1, hws->shifts->DOMAIN17_POWER_GATE, hws->masks-> DOMAIN17_POWER_GATE, power_gate); |
| 369 | |
| 370 | REG_WAIT(DOMAIN17_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN17_PG_STATUS , hws->shifts->DOMAIN17_PGFSM_PWR_STATUS, hws->masks ->DOMAIN17_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 372) |
| 371 | DOMAIN17_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN17_PG_STATUS , hws->shifts->DOMAIN17_PGFSM_PWR_STATUS, hws->masks ->DOMAIN17_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 372) |
| 372 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN17_PG_STATUS , hws->shifts->DOMAIN17_PGFSM_PWR_STATUS, hws->masks ->DOMAIN17_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 372); |
| 373 | break; |
| 374 | case 2: /* DSC2 */ |
| 375 | REG_UPDATE(DOMAIN18_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN18_PG_CONFIG , 1, hws->shifts->DOMAIN18_POWER_GATE, hws->masks-> DOMAIN18_POWER_GATE, power_gate) |
| 376 | DOMAIN18_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN18_PG_CONFIG , 1, hws->shifts->DOMAIN18_POWER_GATE, hws->masks-> DOMAIN18_POWER_GATE, power_gate); |
| 377 | |
| 378 | REG_WAIT(DOMAIN18_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN18_PG_STATUS , hws->shifts->DOMAIN18_PGFSM_PWR_STATUS, hws->masks ->DOMAIN18_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 380) |
| 379 | DOMAIN18_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN18_PG_STATUS , hws->shifts->DOMAIN18_PGFSM_PWR_STATUS, hws->masks ->DOMAIN18_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 380) |
| 380 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN18_PG_STATUS , hws->shifts->DOMAIN18_PGFSM_PWR_STATUS, hws->masks ->DOMAIN18_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 380); |
| 381 | break; |
| 382 | case 3: /* DSC3 */ |
| 383 | REG_UPDATE(DOMAIN19_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN19_PG_CONFIG , 1, hws->shifts->DOMAIN19_POWER_GATE, hws->masks-> DOMAIN19_POWER_GATE, power_gate) |
| 384 | DOMAIN19_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN19_PG_CONFIG , 1, hws->shifts->DOMAIN19_POWER_GATE, hws->masks-> DOMAIN19_POWER_GATE, power_gate); |
| 385 | |
| 386 | REG_WAIT(DOMAIN19_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN19_PG_STATUS , hws->shifts->DOMAIN19_PGFSM_PWR_STATUS, hws->masks ->DOMAIN19_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 388) |
| 387 | DOMAIN19_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN19_PG_STATUS , hws->shifts->DOMAIN19_PGFSM_PWR_STATUS, hws->masks ->DOMAIN19_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 388) |
| 388 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN19_PG_STATUS , hws->shifts->DOMAIN19_PGFSM_PWR_STATUS, hws->masks ->DOMAIN19_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 388); |
| 389 | break; |
| 390 | case 4: /* DSC4 */ |
| 391 | REG_UPDATE(DOMAIN20_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN20_PG_CONFIG , 1, hws->shifts->DOMAIN20_POWER_GATE, hws->masks-> DOMAIN20_POWER_GATE, power_gate) |
| 392 | DOMAIN20_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN20_PG_CONFIG , 1, hws->shifts->DOMAIN20_POWER_GATE, hws->masks-> DOMAIN20_POWER_GATE, power_gate); |
| 393 | |
| 394 | REG_WAIT(DOMAIN20_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN20_PG_STATUS , hws->shifts->DOMAIN20_PGFSM_PWR_STATUS, hws->masks ->DOMAIN20_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 396) |
| 395 | DOMAIN20_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN20_PG_STATUS , hws->shifts->DOMAIN20_PGFSM_PWR_STATUS, hws->masks ->DOMAIN20_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 396) |
| 396 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN20_PG_STATUS , hws->shifts->DOMAIN20_PGFSM_PWR_STATUS, hws->masks ->DOMAIN20_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 396); |
| 397 | break; |
| 398 | case 5: /* DSC5 */ |
| 399 | REG_UPDATE(DOMAIN21_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN21_PG_CONFIG , 1, hws->shifts->DOMAIN21_POWER_GATE, hws->masks-> DOMAIN21_POWER_GATE, power_gate) |
| 400 | DOMAIN21_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN21_PG_CONFIG , 1, hws->shifts->DOMAIN21_POWER_GATE, hws->masks-> DOMAIN21_POWER_GATE, power_gate); |
| 401 | |
| 402 | REG_WAIT(DOMAIN21_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN21_PG_STATUS , hws->shifts->DOMAIN21_PGFSM_PWR_STATUS, hws->masks ->DOMAIN21_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 404) |
| 403 | DOMAIN21_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN21_PG_STATUS , hws->shifts->DOMAIN21_PGFSM_PWR_STATUS, hws->masks ->DOMAIN21_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 404) |
| 404 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN21_PG_STATUS , hws->shifts->DOMAIN21_PGFSM_PWR_STATUS, hws->masks ->DOMAIN21_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__ , 404); |
| 405 | break; |
| 406 | default: |
| 407 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 407); do {} while (0); } while (0); |
| 408 | break; |
| 409 | } |
| 410 | |
| 411 | if (org_ip_request_cntl == 0) |
| 412 | REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0)generic_reg_set_ex(hws->ctx, hws->regs->DC_IP_REQUEST_CNTL , 0, 1, hws->shifts->IP_REQUEST_EN, hws->masks->IP_REQUEST_EN , 0); |
| 413 | } |
| 414 | |
| 415 | void dcn20_dpp_pg_control( |
| 416 | struct dce_hwseq *hws, |
| 417 | unsigned int dpp_inst, |
| 418 | bool_Bool power_on) |
| 419 | { |
| 420 | uint32_t power_gate = power_on ? 0 : 1; |
| 421 | uint32_t pwr_status = power_on ? 0 : 2; |
| 422 | |
| 423 | if (hws->ctx->dc->debug.disable_dpp_power_gate) |
| 424 | return; |
| 425 | if (REG(DOMAIN1_PG_CONFIG)hws->regs->DOMAIN1_PG_CONFIG == 0) |
| 426 | return; |
| 427 | |
| 428 | switch (dpp_inst) { |
| 429 | case 0: /* DPP0 */ |
| 430 | REG_UPDATE(DOMAIN1_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN1_PG_CONFIG , 1, hws->shifts->DOMAIN1_POWER_GATE, hws->masks-> DOMAIN1_POWER_GATE, power_gate) |
| 431 | DOMAIN1_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN1_PG_CONFIG , 1, hws->shifts->DOMAIN1_POWER_GATE, hws->masks-> DOMAIN1_POWER_GATE, power_gate); |
| 432 | |
| 433 | REG_WAIT(DOMAIN1_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN1_PG_STATUS , hws->shifts->DOMAIN1_PGFSM_PWR_STATUS, hws->masks-> DOMAIN1_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 435) |
| 434 | DOMAIN1_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN1_PG_STATUS , hws->shifts->DOMAIN1_PGFSM_PWR_STATUS, hws->masks-> DOMAIN1_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 435) |
| 435 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN1_PG_STATUS , hws->shifts->DOMAIN1_PGFSM_PWR_STATUS, hws->masks-> DOMAIN1_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 435); |
| 436 | break; |
| 437 | case 1: /* DPP1 */ |
| 438 | REG_UPDATE(DOMAIN3_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN3_PG_CONFIG , 1, hws->shifts->DOMAIN3_POWER_GATE, hws->masks-> DOMAIN3_POWER_GATE, power_gate) |
| 439 | DOMAIN3_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN3_PG_CONFIG , 1, hws->shifts->DOMAIN3_POWER_GATE, hws->masks-> DOMAIN3_POWER_GATE, power_gate); |
| 440 | |
| 441 | REG_WAIT(DOMAIN3_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN3_PG_STATUS , hws->shifts->DOMAIN3_PGFSM_PWR_STATUS, hws->masks-> DOMAIN3_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 443) |
| 442 | DOMAIN3_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN3_PG_STATUS , hws->shifts->DOMAIN3_PGFSM_PWR_STATUS, hws->masks-> DOMAIN3_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 443) |
| 443 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN3_PG_STATUS , hws->shifts->DOMAIN3_PGFSM_PWR_STATUS, hws->masks-> DOMAIN3_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 443); |
| 444 | break; |
| 445 | case 2: /* DPP2 */ |
| 446 | REG_UPDATE(DOMAIN5_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN5_PG_CONFIG , 1, hws->shifts->DOMAIN5_POWER_GATE, hws->masks-> DOMAIN5_POWER_GATE, power_gate) |
| 447 | DOMAIN5_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN5_PG_CONFIG , 1, hws->shifts->DOMAIN5_POWER_GATE, hws->masks-> DOMAIN5_POWER_GATE, power_gate); |
| 448 | |
| 449 | REG_WAIT(DOMAIN5_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN5_PG_STATUS , hws->shifts->DOMAIN5_PGFSM_PWR_STATUS, hws->masks-> DOMAIN5_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 451) |
| 450 | DOMAIN5_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN5_PG_STATUS , hws->shifts->DOMAIN5_PGFSM_PWR_STATUS, hws->masks-> DOMAIN5_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 451) |
| 451 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN5_PG_STATUS , hws->shifts->DOMAIN5_PGFSM_PWR_STATUS, hws->masks-> DOMAIN5_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 451); |
| 452 | break; |
| 453 | case 3: /* DPP3 */ |
| 454 | REG_UPDATE(DOMAIN7_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN7_PG_CONFIG , 1, hws->shifts->DOMAIN7_POWER_GATE, hws->masks-> DOMAIN7_POWER_GATE, power_gate) |
| 455 | DOMAIN7_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN7_PG_CONFIG , 1, hws->shifts->DOMAIN7_POWER_GATE, hws->masks-> DOMAIN7_POWER_GATE, power_gate); |
| 456 | |
| 457 | REG_WAIT(DOMAIN7_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN7_PG_STATUS , hws->shifts->DOMAIN7_PGFSM_PWR_STATUS, hws->masks-> DOMAIN7_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 459) |
| 458 | DOMAIN7_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN7_PG_STATUS , hws->shifts->DOMAIN7_PGFSM_PWR_STATUS, hws->masks-> DOMAIN7_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 459) |
| 459 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN7_PG_STATUS , hws->shifts->DOMAIN7_PGFSM_PWR_STATUS, hws->masks-> DOMAIN7_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 459); |
| 460 | break; |
| 461 | case 4: /* DPP4 */ |
| 462 | REG_UPDATE(DOMAIN9_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN9_PG_CONFIG , 1, hws->shifts->DOMAIN9_POWER_GATE, hws->masks-> DOMAIN9_POWER_GATE, power_gate) |
| 463 | DOMAIN9_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN9_PG_CONFIG , 1, hws->shifts->DOMAIN9_POWER_GATE, hws->masks-> DOMAIN9_POWER_GATE, power_gate); |
| 464 | |
| 465 | REG_WAIT(DOMAIN9_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN9_PG_STATUS , hws->shifts->DOMAIN9_PGFSM_PWR_STATUS, hws->masks-> DOMAIN9_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 467) |
| 466 | DOMAIN9_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN9_PG_STATUS , hws->shifts->DOMAIN9_PGFSM_PWR_STATUS, hws->masks-> DOMAIN9_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 467) |
| 467 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN9_PG_STATUS , hws->shifts->DOMAIN9_PGFSM_PWR_STATUS, hws->masks-> DOMAIN9_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 467); |
| 468 | break; |
| 469 | case 5: /* DPP5 */ |
| 470 | /* |
| 471 | * Do not power gate DPP5, should be left at HW default, power on permanently. |
| 472 | * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard |
| 473 | * reset. |
| 474 | * REG_UPDATE(DOMAIN11_PG_CONFIG, |
| 475 | * DOMAIN11_POWER_GATE, power_gate); |
| 476 | * |
| 477 | * REG_WAIT(DOMAIN11_PG_STATUS, |
| 478 | * DOMAIN11_PGFSM_PWR_STATUS, pwr_status, |
| 479 | * 1, 1000); |
| 480 | */ |
| 481 | break; |
| 482 | default: |
| 483 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 483); do {} while (0); } while (0); |
| 484 | break; |
| 485 | } |
| 486 | } |
| 487 | |
| 488 | |
| 489 | void dcn20_hubp_pg_control( |
| 490 | struct dce_hwseq *hws, |
| 491 | unsigned int hubp_inst, |
| 492 | bool_Bool power_on) |
| 493 | { |
| 494 | uint32_t power_gate = power_on ? 0 : 1; |
| 495 | uint32_t pwr_status = power_on ? 0 : 2; |
| 496 | |
| 497 | if (hws->ctx->dc->debug.disable_hubp_power_gate) |
| 498 | return; |
| 499 | if (REG(DOMAIN0_PG_CONFIG)hws->regs->DOMAIN0_PG_CONFIG == 0) |
| 500 | return; |
| 501 | |
| 502 | switch (hubp_inst) { |
| 503 | case 0: /* DCHUBP0 */ |
| 504 | REG_UPDATE(DOMAIN0_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN0_PG_CONFIG , 1, hws->shifts->DOMAIN0_POWER_GATE, hws->masks-> DOMAIN0_POWER_GATE, power_gate) |
| 505 | DOMAIN0_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN0_PG_CONFIG , 1, hws->shifts->DOMAIN0_POWER_GATE, hws->masks-> DOMAIN0_POWER_GATE, power_gate); |
| 506 | |
| 507 | REG_WAIT(DOMAIN0_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN0_PG_STATUS , hws->shifts->DOMAIN0_PGFSM_PWR_STATUS, hws->masks-> DOMAIN0_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 509) |
| 508 | DOMAIN0_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN0_PG_STATUS , hws->shifts->DOMAIN0_PGFSM_PWR_STATUS, hws->masks-> DOMAIN0_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 509) |
| 509 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN0_PG_STATUS , hws->shifts->DOMAIN0_PGFSM_PWR_STATUS, hws->masks-> DOMAIN0_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 509); |
| 510 | break; |
| 511 | case 1: /* DCHUBP1 */ |
| 512 | REG_UPDATE(DOMAIN2_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN2_PG_CONFIG , 1, hws->shifts->DOMAIN2_POWER_GATE, hws->masks-> DOMAIN2_POWER_GATE, power_gate) |
| 513 | DOMAIN2_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN2_PG_CONFIG , 1, hws->shifts->DOMAIN2_POWER_GATE, hws->masks-> DOMAIN2_POWER_GATE, power_gate); |
| 514 | |
| 515 | REG_WAIT(DOMAIN2_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN2_PG_STATUS , hws->shifts->DOMAIN2_PGFSM_PWR_STATUS, hws->masks-> DOMAIN2_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 517) |
| 516 | DOMAIN2_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN2_PG_STATUS , hws->shifts->DOMAIN2_PGFSM_PWR_STATUS, hws->masks-> DOMAIN2_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 517) |
| 517 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN2_PG_STATUS , hws->shifts->DOMAIN2_PGFSM_PWR_STATUS, hws->masks-> DOMAIN2_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 517); |
| 518 | break; |
| 519 | case 2: /* DCHUBP2 */ |
| 520 | REG_UPDATE(DOMAIN4_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN4_PG_CONFIG , 1, hws->shifts->DOMAIN4_POWER_GATE, hws->masks-> DOMAIN4_POWER_GATE, power_gate) |
| 521 | DOMAIN4_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN4_PG_CONFIG , 1, hws->shifts->DOMAIN4_POWER_GATE, hws->masks-> DOMAIN4_POWER_GATE, power_gate); |
| 522 | |
| 523 | REG_WAIT(DOMAIN4_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN4_PG_STATUS , hws->shifts->DOMAIN4_PGFSM_PWR_STATUS, hws->masks-> DOMAIN4_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 525) |
| 524 | DOMAIN4_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN4_PG_STATUS , hws->shifts->DOMAIN4_PGFSM_PWR_STATUS, hws->masks-> DOMAIN4_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 525) |
| 525 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN4_PG_STATUS , hws->shifts->DOMAIN4_PGFSM_PWR_STATUS, hws->masks-> DOMAIN4_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 525); |
| 526 | break; |
| 527 | case 3: /* DCHUBP3 */ |
| 528 | REG_UPDATE(DOMAIN6_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN6_PG_CONFIG , 1, hws->shifts->DOMAIN6_POWER_GATE, hws->masks-> DOMAIN6_POWER_GATE, power_gate) |
| 529 | DOMAIN6_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN6_PG_CONFIG , 1, hws->shifts->DOMAIN6_POWER_GATE, hws->masks-> DOMAIN6_POWER_GATE, power_gate); |
| 530 | |
| 531 | REG_WAIT(DOMAIN6_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN6_PG_STATUS , hws->shifts->DOMAIN6_PGFSM_PWR_STATUS, hws->masks-> DOMAIN6_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 533) |
| 532 | DOMAIN6_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN6_PG_STATUS , hws->shifts->DOMAIN6_PGFSM_PWR_STATUS, hws->masks-> DOMAIN6_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 533) |
| 533 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN6_PG_STATUS , hws->shifts->DOMAIN6_PGFSM_PWR_STATUS, hws->masks-> DOMAIN6_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 533); |
| 534 | break; |
| 535 | case 4: /* DCHUBP4 */ |
| 536 | REG_UPDATE(DOMAIN8_PG_CONFIG,generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN8_PG_CONFIG , 1, hws->shifts->DOMAIN8_POWER_GATE, hws->masks-> DOMAIN8_POWER_GATE, power_gate) |
| 537 | DOMAIN8_POWER_GATE, power_gate)generic_reg_update_ex(hws->ctx, hws->regs->DOMAIN8_PG_CONFIG , 1, hws->shifts->DOMAIN8_POWER_GATE, hws->masks-> DOMAIN8_POWER_GATE, power_gate); |
| 538 | |
| 539 | REG_WAIT(DOMAIN8_PG_STATUS,generic_reg_wait(hws->ctx, hws->regs->DOMAIN8_PG_STATUS , hws->shifts->DOMAIN8_PGFSM_PWR_STATUS, hws->masks-> DOMAIN8_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 541) |
| 540 | DOMAIN8_PGFSM_PWR_STATUS, pwr_status,generic_reg_wait(hws->ctx, hws->regs->DOMAIN8_PG_STATUS , hws->shifts->DOMAIN8_PGFSM_PWR_STATUS, hws->masks-> DOMAIN8_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 541) |
| 541 | 1, 1000)generic_reg_wait(hws->ctx, hws->regs->DOMAIN8_PG_STATUS , hws->shifts->DOMAIN8_PGFSM_PWR_STATUS, hws->masks-> DOMAIN8_PGFSM_PWR_STATUS, pwr_status, 1, 1000, __func__, 541); |
| 542 | break; |
| 543 | case 5: /* DCHUBP5 */ |
| 544 | /* |
| 545 | * Do not power gate DCHUB5, should be left at HW default, power on permanently. |
| 546 | * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard |
| 547 | * reset. |
| 548 | * REG_UPDATE(DOMAIN10_PG_CONFIG, |
| 549 | * DOMAIN10_POWER_GATE, power_gate); |
| 550 | * |
| 551 | * REG_WAIT(DOMAIN10_PG_STATUS, |
| 552 | * DOMAIN10_PGFSM_PWR_STATUS, pwr_status, |
| 553 | * 1, 1000); |
| 554 | */ |
| 555 | break; |
| 556 | default: |
| 557 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 557); do {} while (0); } while (0); |
| 558 | break; |
| 559 | } |
| 560 | } |
| 561 | |
| 562 | |
| 563 | /* disable HW used by plane. |
| 564 | * note: cannot disable until disconnect is complete |
| 565 | */ |
| 566 | void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) |
| 567 | { |
| 568 | struct dce_hwseq *hws = dc->hwseq; |
| 569 | struct hubp *hubp = pipe_ctx->plane_res.hubp; |
| 570 | struct dpp *dpp = pipe_ctx->plane_res.dpp; |
| 571 | |
| 572 | dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx); |
| 573 | |
| 574 | /* In flip immediate with pipe splitting case GSL is used for |
| 575 | * synchronization so we must disable it when the plane is disabled. |
| 576 | */ |
| 577 | if (pipe_ctx->stream_res.gsl_group != 0) |
| 578 | dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false0); |
| 579 | |
| 580 | dc->hwss.set_flip_control_gsl(pipe_ctx, false0); |
| 581 | |
| 582 | hubp->funcs->hubp_clk_cntl(hubp, false0); |
| 583 | |
| 584 | dpp->funcs->dpp_dppclk_control(dpp, false0, false0); |
| 585 | |
| 586 | hubp->power_gated = true1; |
| 587 | |
| 588 | hws->funcs.plane_atomic_power_down(dc, |
| 589 | pipe_ctx->plane_res.dpp, |
| 590 | pipe_ctx->plane_res.hubp); |
| 591 | |
| 592 | pipe_ctx->stream = NULL((void *)0); |
| 593 | memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res))__builtin_memset((&pipe_ctx->stream_res), (0), (sizeof (pipe_ctx->stream_res))); |
| 594 | memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res))__builtin_memset((&pipe_ctx->plane_res), (0), (sizeof( pipe_ctx->plane_res))); |
| 595 | pipe_ctx->top_pipe = NULL((void *)0); |
| 596 | pipe_ctx->bottom_pipe = NULL((void *)0); |
| 597 | pipe_ctx->plane_state = NULL((void *)0); |
| 598 | } |
| 599 | |
| 600 | |
| 601 | void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) |
| 602 | { |
| 603 | DC_LOGGER_INIT(dc->ctx->logger); |
| 604 | |
| 605 | if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) |
| 606 | return; |
| 607 | |
| 608 | dcn20_plane_atomic_disable(dc, pipe_ctx); |
| 609 | |
| 610 | DC_LOG_DC("Power down front end %d\n",__drm_dbg(DRM_UT_KMS, "Power down front end %d\n", pipe_ctx-> pipe_idx) |
| 611 | pipe_ctx->pipe_idx)__drm_dbg(DRM_UT_KMS, "Power down front end %d\n", pipe_ctx-> pipe_idx); |
| 612 | } |
| 613 | |
| 614 | #if defined(CONFIG_DRM_AMD_DC_DCN3_01) |
| 615 | static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, |
| 616 | int opp_cnt) |
| 617 | { |
| 618 | bool_Bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); |
| 619 | int flow_ctrl_cnt; |
| 620 | |
| 621 | if (opp_cnt >= 2) |
| 622 | hblank_halved = true1; |
| 623 | |
| 624 | flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - |
| 625 | stream->timing.h_border_left - |
| 626 | stream->timing.h_border_right; |
| 627 | |
| 628 | if (hblank_halved) |
| 629 | flow_ctrl_cnt /= 2; |
| 630 | |
| 631 | /* ODM combine 4:1 case */ |
| 632 | if (opp_cnt == 4) |
| 633 | flow_ctrl_cnt /= 2; |
| 634 | |
| 635 | return flow_ctrl_cnt; |
| 636 | } |
| 637 | #endif |
| 638 | |
| 639 | enum dc_status dcn20_enable_stream_timing( |
| 640 | struct pipe_ctx *pipe_ctx, |
| 641 | struct dc_state *context, |
| 642 | struct dc *dc) |
| 643 | { |
| 644 | struct dce_hwseq *hws = dc->hwseq; |
| 645 | struct dc_stream_state *stream = pipe_ctx->stream; |
| 646 | struct drr_params params = {0}; |
| 647 | unsigned int event_triggers = 0; |
| 648 | struct pipe_ctx *odm_pipe; |
| 649 | int opp_cnt = 1; |
| 650 | int opp_inst[MAX_PIPES6] = { pipe_ctx->stream_res.opp->inst }; |
| 651 | |
| 652 | #if defined(CONFIG_DRM_AMD_DC_DCN3_01) |
| 653 | bool_Bool interlace = stream->timing.flags.INTERLACE; |
| 654 | int i; |
| 655 | |
| 656 | struct mpc_dwb_flow_control flow_control; |
| 657 | struct mpc *mpc = dc->res_pool->mpc; |
| 658 | bool_Bool rate_control_2x_pclk = (interlace || optc2_is_two_pixels_per_containter(&stream->timing)); |
| 659 | |
| 660 | #endif |
| 661 | /* by upper caller loop, pipe0 is parent pipe and be called first. |
| 662 | * back end is set up by for pipe0. Other children pipe share back end |
| 663 | * with pipe 0. No program is needed. |
| 664 | */ |
| 665 | if (pipe_ctx->top_pipe != NULL((void *)0)) |
| 666 | return DC_OK; |
| 667 | |
| 668 | /* TODO check if timing_changed, disable stream if timing changed */ |
| 669 | |
| 670 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { |
| 671 | opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; |
| 672 | opp_cnt++; |
| 673 | } |
| 674 | |
| 675 | if (opp_cnt > 1) |
| 676 | pipe_ctx->stream_res.tg->funcs->set_odm_combine( |
| 677 | pipe_ctx->stream_res.tg, |
| 678 | opp_inst, opp_cnt, |
| 679 | &pipe_ctx->stream->timing); |
| 680 | |
| 681 | /* HW program guide assume display already disable |
| 682 | * by unplug sequence. OTG assume stop. |
| 683 | */ |
| 684 | pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true1); |
| 685 | |
| 686 | if (false0 == pipe_ctx->clock_source->funcs->program_pix_clk( |
| 687 | pipe_ctx->clock_source, |
| 688 | &pipe_ctx->stream_res.pix_clk_params, |
| 689 | &pipe_ctx->pll_settings)) { |
| 690 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 690); do {} while (0); } while (0); |
| 691 | return DC_ERROR_UNEXPECTED; |
| 692 | } |
| 693 | |
| 694 | if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal))) |
| 695 | dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx); |
| 696 | |
| 697 | pipe_ctx->stream_res.tg->funcs->program_timing( |
| 698 | pipe_ctx->stream_res.tg, |
| 699 | &stream->timing, |
| 700 | pipe_ctx->pipe_dlg_param.vready_offset, |
| 701 | pipe_ctx->pipe_dlg_param.vstartup_start, |
| 702 | pipe_ctx->pipe_dlg_param.vupdate_offset, |
| 703 | pipe_ctx->pipe_dlg_param.vupdate_width, |
| 704 | pipe_ctx->stream->signal, |
| 705 | true1); |
| 706 | |
| 707 | #if defined(CONFIG_DRM_AMD_DC_DCN3_01) |
| 708 | rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; |
| 709 | flow_control.flow_ctrl_mode = 0; |
| 710 | flow_control.flow_ctrl_cnt0 = 0x80; |
| 711 | flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(stream, opp_cnt); |
| 712 | if (mpc->funcs->set_out_rate_control) { |
| 713 | for (i = 0; i < opp_cnt; ++i) { |
| 714 | mpc->funcs->set_out_rate_control( |
| 715 | mpc, opp_inst[i], |
| 716 | true1, |
| 717 | rate_control_2x_pclk, |
| 718 | &flow_control); |
| 719 | } |
| 720 | } |
| 721 | #endif |
| 722 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) |
| 723 | odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( |
| 724 | odm_pipe->stream_res.opp, |
| 725 | true1); |
| 726 | |
| 727 | pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( |
| 728 | pipe_ctx->stream_res.opp, |
| 729 | true1); |
| 730 | |
| 731 | hws->funcs.blank_pixel_data(dc, pipe_ctx, true1); |
| 732 | |
| 733 | /* VTG is within DCHUB command block. DCFCLK is always on */ |
| 734 | if (false0 == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) { |
| 735 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 735); do {} while (0); } while (0); |
| 736 | return DC_ERROR_UNEXPECTED; |
| 737 | } |
| 738 | |
| 739 | hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); |
| 740 | |
| 741 | params.vertical_total_min = stream->adjust.v_total_min; |
| 742 | params.vertical_total_max = stream->adjust.v_total_max; |
| 743 | params.vertical_total_mid = stream->adjust.v_total_mid; |
| 744 | params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num; |
| 745 | if (pipe_ctx->stream_res.tg->funcs->set_drr) |
| 746 | pipe_ctx->stream_res.tg->funcs->set_drr( |
| 747 | pipe_ctx->stream_res.tg, ¶ms); |
| 748 | |
| 749 | // DRR should set trigger event to monitor surface update event |
| 750 | if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) |
| 751 | event_triggers = 0x80; |
| 752 | /* Event triggers and num frames initialized for DRR, but can be |
| 753 | * later updated for PSR use. Note DRR trigger events are generated |
| 754 | * regardless of whether num frames met. |
| 755 | */ |
| 756 | if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) |
| 757 | pipe_ctx->stream_res.tg->funcs->set_static_screen_control( |
| 758 | pipe_ctx->stream_res.tg, event_triggers, 2); |
| 759 | |
| 760 | /* TODO program crtc source select for non-virtual signal*/ |
| 761 | /* TODO program FMT */ |
| 762 | /* TODO setup link_enc */ |
| 763 | /* TODO set stream attributes */ |
| 764 | /* TODO program audio */ |
| 765 | /* TODO enable stream if timing changed */ |
| 766 | /* TODO unblank stream if DP */ |
| 767 | |
| 768 | return DC_OK; |
| 769 | } |
| 770 | |
| 771 | void dcn20_program_output_csc(struct dc *dc, |
| 772 | struct pipe_ctx *pipe_ctx, |
| 773 | enum dc_color_space colorspace, |
| 774 | uint16_t *matrix, |
| 775 | int opp_id) |
| 776 | { |
| 777 | struct mpc *mpc = dc->res_pool->mpc; |
| 778 | enum mpc_output_csc_mode ocsc_mode = MPC_OUTPUT_CSC_COEF_A; |
| 779 | int mpcc_id = pipe_ctx->plane_res.hubp->inst; |
| 780 | |
| 781 | if (mpc->funcs->power_on_mpc_mem_pwr) |
| 782 | mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true1); |
| 783 | |
| 784 | if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true1) { |
| 785 | if (mpc->funcs->set_output_csc != NULL((void *)0)) |
| 786 | mpc->funcs->set_output_csc(mpc, |
| 787 | opp_id, |
| 788 | matrix, |
| 789 | ocsc_mode); |
| 790 | } else { |
| 791 | if (mpc->funcs->set_ocsc_default != NULL((void *)0)) |
| 792 | mpc->funcs->set_ocsc_default(mpc, |
| 793 | opp_id, |
| 794 | colorspace, |
| 795 | ocsc_mode); |
| 796 | } |
| 797 | } |
| 798 | |
| 799 | bool_Bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, |
| 800 | const struct dc_stream_state *stream) |
| 801 | { |
| 802 | int mpcc_id = pipe_ctx->plane_res.hubp->inst; |
| 803 | struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; |
| 804 | struct pwl_params *params = NULL((void *)0); |
| 805 | /* |
| 806 | * program OGAM only for the top pipe |
| 807 | * if there is a pipe split then fix diagnostic is required: |
| 808 | * how to pass OGAM parameter for stream. |
| 809 | * if programming for all pipes is required then remove condition |
| 810 | * pipe_ctx->top_pipe == NULL ,but then fix the diagnostic. |
| 811 | */ |
| 812 | if (mpc->funcs->power_on_mpc_mem_pwr) |
| 813 | mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true1); |
| 814 | if (pipe_ctx->top_pipe == NULL((void *)0) |
| 815 | && mpc->funcs->set_output_gamma && stream->out_transfer_func) { |
| 816 | if (stream->out_transfer_func->type == TF_TYPE_HWPWL) |
| 817 | params = &stream->out_transfer_func->pwl; |
| 818 | else if (pipe_ctx->stream->out_transfer_func->type == |
| 819 | TF_TYPE_DISTRIBUTED_POINTS && |
| 820 | cm_helper_translate_curve_to_hw_format( |
| 821 | stream->out_transfer_func, |
| 822 | &mpc->blender_params, false0)) |
| 823 | params = &mpc->blender_params; |
| 824 | /* |
| 825 | * there is no ROM |
| 826 | */ |
| 827 | if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) |
| 828 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 828); do {} while (0); } while (0); |
| 829 | } |
| 830 | /* |
| 831 | * if above if is not executed then 'params' equal to 0 and set in bypass |
| 832 | */ |
| 833 | mpc->funcs->set_output_gamma(mpc, mpcc_id, params); |
| 834 | |
| 835 | return true1; |
| 836 | } |
| 837 | |
| 838 | bool_Bool dcn20_set_blend_lut( |
| 839 | struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) |
| 840 | { |
| 841 | struct dpp *dpp_base = pipe_ctx->plane_res.dpp; |
| 842 | bool_Bool result = true1; |
| 843 | struct pwl_params *blend_lut = NULL((void *)0); |
| 844 | |
| 845 | if (plane_state->blend_tf) { |
| 846 | if (plane_state->blend_tf->type == TF_TYPE_HWPWL) |
| 847 | blend_lut = &plane_state->blend_tf->pwl; |
| 848 | else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { |
| 849 | cm_helper_translate_curve_to_hw_format( |
| 850 | plane_state->blend_tf, |
| 851 | &dpp_base->regamma_params, false0); |
| 852 | blend_lut = &dpp_base->regamma_params; |
| 853 | } |
| 854 | } |
| 855 | result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); |
| 856 | |
| 857 | return result; |
| 858 | } |
| 859 | |
| 860 | bool_Bool dcn20_set_shaper_3dlut( |
| 861 | struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) |
| 862 | { |
| 863 | struct dpp *dpp_base = pipe_ctx->plane_res.dpp; |
| 864 | bool_Bool result = true1; |
| 865 | struct pwl_params *shaper_lut = NULL((void *)0); |
| 866 | |
| 867 | if (plane_state->in_shaper_func) { |
| 868 | if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) |
| 869 | shaper_lut = &plane_state->in_shaper_func->pwl; |
| 870 | else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { |
| 871 | cm_helper_translate_curve_to_hw_format( |
| 872 | plane_state->in_shaper_func, |
| 873 | &dpp_base->shaper_params, true1); |
| 874 | shaper_lut = &dpp_base->shaper_params; |
| 875 | } |
| 876 | } |
| 877 | |
| 878 | result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut); |
Value stored to 'result' is never read | |
| 879 | if (plane_state->lut3d_func && |
| 880 | plane_state->lut3d_func->state.bits.initialized == 1) |
| 881 | result = dpp_base->funcs->dpp_program_3dlut(dpp_base, |
| 882 | &plane_state->lut3d_func->lut_3d); |
| 883 | else |
| 884 | result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL((void *)0)); |
| 885 | |
| 886 | return result; |
| 887 | } |
| 888 | |
| 889 | bool_Bool dcn20_set_input_transfer_func(struct dc *dc, |
| 890 | struct pipe_ctx *pipe_ctx, |
| 891 | const struct dc_plane_state *plane_state) |
| 892 | { |
| 893 | struct dce_hwseq *hws = dc->hwseq; |
| 894 | struct dpp *dpp_base = pipe_ctx->plane_res.dpp; |
| 895 | const struct dc_transfer_func *tf = NULL((void *)0); |
| 896 | bool_Bool result = true1; |
| 897 | bool_Bool use_degamma_ram = false0; |
| 898 | |
| 899 | if (dpp_base == NULL((void *)0) || plane_state == NULL((void *)0)) |
| 900 | return false0; |
| 901 | |
| 902 | hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state); |
| 903 | hws->funcs.set_blend_lut(pipe_ctx, plane_state); |
| 904 | |
| 905 | if (plane_state->in_transfer_func) |
| 906 | tf = plane_state->in_transfer_func; |
| 907 | |
| 908 | |
| 909 | if (tf == NULL((void *)0)) { |
| 910 | dpp_base->funcs->dpp_set_degamma(dpp_base, |
| 911 | IPP_DEGAMMA_MODE_BYPASS); |
| 912 | return true1; |
| 913 | } |
| 914 | |
| 915 | if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS) |
| 916 | use_degamma_ram = true1; |
| 917 | |
| 918 | if (use_degamma_ram == true1) { |
| 919 | if (tf->type == TF_TYPE_HWPWL) |
| 920 | dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, |
| 921 | &tf->pwl); |
| 922 | else if (tf->type == TF_TYPE_DISTRIBUTED_POINTS) { |
| 923 | cm_helper_translate_curve_to_degamma_hw_format(tf, |
| 924 | &dpp_base->degamma_params); |
| 925 | dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, |
| 926 | &dpp_base->degamma_params); |
| 927 | } |
| 928 | return true1; |
| 929 | } |
| 930 | /* handle here the optimized cases when de-gamma ROM could be used. |
| 931 | * |
| 932 | */ |
| 933 | if (tf->type == TF_TYPE_PREDEFINED) { |
| 934 | switch (tf->tf) { |
| 935 | case TRANSFER_FUNCTION_SRGB: |
| 936 | dpp_base->funcs->dpp_set_degamma(dpp_base, |
| 937 | IPP_DEGAMMA_MODE_HW_sRGB); |
| 938 | break; |
| 939 | case TRANSFER_FUNCTION_BT709: |
| 940 | dpp_base->funcs->dpp_set_degamma(dpp_base, |
| 941 | IPP_DEGAMMA_MODE_HW_xvYCC); |
| 942 | break; |
| 943 | case TRANSFER_FUNCTION_LINEAR: |
| 944 | dpp_base->funcs->dpp_set_degamma(dpp_base, |
| 945 | IPP_DEGAMMA_MODE_BYPASS); |
| 946 | break; |
| 947 | case TRANSFER_FUNCTION_PQ: |
| 948 | dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL); |
| 949 | cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params); |
| 950 | dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params); |
| 951 | result = true1; |
| 952 | break; |
| 953 | default: |
| 954 | result = false0; |
| 955 | break; |
| 956 | } |
| 957 | } else if (tf->type == TF_TYPE_BYPASS) |
| 958 | dpp_base->funcs->dpp_set_degamma(dpp_base, |
| 959 | IPP_DEGAMMA_MODE_BYPASS); |
| 960 | else { |
| 961 | /* |
| 962 | * if we are here, we did not handle correctly. |
| 963 | * fix is required for this use case |
| 964 | */ |
| 965 | BREAK_TO_DEBUGGER()do { __drm_dbg(DRM_UT_DRIVER, "%s():%d\n", __func__, 965); do {} while (0); } while (0); |
| 966 | dpp_base->funcs->dpp_set_degamma(dpp_base, |
| 967 | IPP_DEGAMMA_MODE_BYPASS); |
| 968 | } |
| 969 | |
| 970 | return result; |
| 971 | } |
| 972 | |
| 973 | void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) |
| 974 | { |
| 975 | struct pipe_ctx *odm_pipe; |
| 976 | int opp_cnt = 1; |
| 977 | int opp_inst[MAX_PIPES6] = { pipe_ctx->stream_res.opp->inst }; |
| 978 | |
| 979 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { |
| 980 | opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; |
| 981 | opp_cnt++; |
| 982 | } |
| 983 | |
| 984 | if (opp_cnt > 1) |
| 985 | pipe_ctx->stream_res.tg->funcs->set_odm_combine( |
| 986 | pipe_ctx->stream_res.tg, |
| 987 | opp_inst, opp_cnt, |
| 988 | &pipe_ctx->stream->timing); |
| 989 | else |
| 990 | pipe_ctx->stream_res.tg->funcs->set_odm_bypass( |
| 991 | pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); |
| 992 | } |
| 993 | |
| 994 | void dcn20_blank_pixel_data( |
| 995 | struct dc *dc, |
| 996 | struct pipe_ctx *pipe_ctx, |
| 997 | bool_Bool blank) |
| 998 | { |
| 999 | struct tg_color black_color = {0}; |
| 1000 | struct stream_resource *stream_res = &pipe_ctx->stream_res; |
| 1001 | struct dc_stream_state *stream = pipe_ctx->stream; |
| 1002 | enum dc_color_space color_space = stream->output_color_space; |
| 1003 | enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; |
| 1004 | enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; |
| 1005 | struct pipe_ctx *odm_pipe; |
| 1006 | int odm_cnt = 1; |
| 1007 | |
| 1008 | int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; |
| 1009 | int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; |
| 1010 | |
| 1011 | if (stream->link->test_pattern_enabled) |
| 1012 | return; |
| 1013 | |
| 1014 | /* get opp dpg blank color */ |
| 1015 | color_space_to_black_color(dc, color_space, &black_color); |
| 1016 | |
| 1017 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) |
| 1018 | odm_cnt++; |
| 1019 | |
| 1020 | width = width / odm_cnt; |
| 1021 | |
| 1022 | if (blank) { |
| 1023 | dc->hwss.set_abm_immediate_disable(pipe_ctx); |
| 1024 | |
| 1025 | if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) { |
| 1026 | test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; |
| 1027 | test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; |
| 1028 | } |
| 1029 | } else { |
| 1030 | test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; |
| 1031 | } |
| 1032 | |
| 1033 | stream_res->opp->funcs->opp_set_disp_pattern_generator( |
| 1034 | stream_res->opp, |
| 1035 | test_pattern, |
| 1036 | test_pattern_color_space, |
| 1037 | stream->timing.display_color_depth, |
| 1038 | &black_color, |
| 1039 | width, |
| 1040 | height, |
| 1041 | 0); |
| 1042 | |
| 1043 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { |
| 1044 | odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator( |
| 1045 | odm_pipe->stream_res.opp, |
| 1046 | dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ? |
| 1047 | CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, |
| 1048 | test_pattern_color_space, |
| 1049 | stream->timing.display_color_depth, |
| 1050 | &black_color, |
| 1051 | width, |
| 1052 | height, |
| 1053 | 0); |
| 1054 | } |
| 1055 | |
| 1056 | if (!blank) |
| 1057 | if (stream_res->abm) { |
| 1058 | dc->hwss.set_pipe(pipe_ctx); |
| 1059 | stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level); |
| 1060 | } |
| 1061 | } |
| 1062 | |
| 1063 | |
| 1064 | static void dcn20_power_on_plane( |
| 1065 | struct dce_hwseq *hws, |
| 1066 | struct pipe_ctx *pipe_ctx) |
| 1067 | { |
| 1068 | DC_LOGGER_INIT(hws->ctx->logger); |
| 1069 | if (REG(DC_IP_REQUEST_CNTL)hws->regs->DC_IP_REQUEST_CNTL) { |
| 1070 | REG_SET(DC_IP_REQUEST_CNTL, 0,generic_reg_set_ex(hws->ctx, hws->regs->DC_IP_REQUEST_CNTL , 0, 1, hws->shifts->IP_REQUEST_EN, hws->masks->IP_REQUEST_EN , 1) |
| 1071 | IP_REQUEST_EN, 1)generic_reg_set_ex(hws->ctx, hws->regs->DC_IP_REQUEST_CNTL , 0, 1, hws->shifts->IP_REQUEST_EN, hws->masks->IP_REQUEST_EN , 1); |
| 1072 | |
| 1073 | if (hws->funcs.dpp_pg_control) |
| 1074 | hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true1); |
| 1075 | |
| 1076 | if (hws->funcs.hubp_pg_control) |
| 1077 | hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true1); |
| 1078 | |
| 1079 | REG_SET(DC_IP_REQUEST_CNTL, 0,generic_reg_set_ex(hws->ctx, hws->regs->DC_IP_REQUEST_CNTL , 0, 1, hws->shifts->IP_REQUEST_EN, hws->masks->IP_REQUEST_EN , 0) |
| 1080 | IP_REQUEST_EN, 0)generic_reg_set_ex(hws->ctx, hws->regs->DC_IP_REQUEST_CNTL , 0, 1, hws->shifts->IP_REQUEST_EN, hws->masks->IP_REQUEST_EN , 0); |
| 1081 | DC_LOG_DEBUG(__drm_dbg(DRM_UT_KMS, "Un-gated front end for pipe %d\n", pipe_ctx ->plane_res.hubp->inst) |
| 1082 | "Un-gated front end for pipe %d\n", pipe_ctx->plane_res.hubp->inst)__drm_dbg(DRM_UT_KMS, "Un-gated front end for pipe %d\n", pipe_ctx ->plane_res.hubp->inst); |
| 1083 | } |
| 1084 | } |
| 1085 | |
| 1086 | void dcn20_enable_plane( |
| 1087 | struct dc *dc, |
| 1088 | struct pipe_ctx *pipe_ctx, |
| 1089 | struct dc_state *context) |
| 1090 | { |
| 1091 | //if (dc->debug.sanity_checks) { |
| 1092 | // dcn10_verify_allow_pstate_change_high(dc); |
| 1093 | //} |
| 1094 | dcn20_power_on_plane(dc->hwseq, pipe_ctx); |
| 1095 | |
| 1096 | /* enable DCFCLK current DCHUB */ |
| 1097 | pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true1); |
| 1098 | |
| 1099 | /* initialize HUBP on power up */ |
| 1100 | pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp); |
| 1101 | |
| 1102 | /* make sure OPP_PIPE_CLOCK_EN = 1 */ |
| 1103 | pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( |
| 1104 | pipe_ctx->stream_res.opp, |
| 1105 | true1); |
| 1106 | |
| 1107 | /* TODO: enable/disable in dm as per update type. |
| 1108 | if (plane_state) { |
| 1109 | DC_LOG_DC(dc->ctx->logger, |
| 1110 | "Pipe:%d 0x%x: addr hi:0x%x, " |
| 1111 | "addr low:0x%x, " |
| 1112 | "src: %d, %d, %d," |
| 1113 | " %d; dst: %d, %d, %d, %d;\n", |
| 1114 | pipe_ctx->pipe_idx, |
| 1115 | plane_state, |
| 1116 | plane_state->address.grph.addr.high_part, |
| 1117 | plane_state->address.grph.addr.low_part, |
| 1118 | plane_state->src_rect.x, |
| 1119 | plane_state->src_rect.y, |
| 1120 | plane_state->src_rect.width, |
| 1121 | plane_state->src_rect.height, |
| 1122 | plane_state->dst_rect.x, |
| 1123 | plane_state->dst_rect.y, |
| 1124 | plane_state->dst_rect.width, |
| 1125 | plane_state->dst_rect.height); |
| 1126 | |
| 1127 | DC_LOG_DC(dc->ctx->logger, |
| 1128 | "Pipe %d: width, height, x, y format:%d\n" |
| 1129 | "viewport:%d, %d, %d, %d\n" |
| 1130 | "recout: %d, %d, %d, %d\n", |
| 1131 | pipe_ctx->pipe_idx, |
| 1132 | plane_state->format, |
| 1133 | pipe_ctx->plane_res.scl_data.viewport.width, |
| 1134 | pipe_ctx->plane_res.scl_data.viewport.height, |
| 1135 | pipe_ctx->plane_res.scl_data.viewport.x, |
| 1136 | pipe_ctx->plane_res.scl_data.viewport.y, |
| 1137 | pipe_ctx->plane_res.scl_data.recout.width, |
| 1138 | pipe_ctx->plane_res.scl_data.recout.height, |
| 1139 | pipe_ctx->plane_res.scl_data.recout.x, |
| 1140 | pipe_ctx->plane_res.scl_data.recout.y); |
| 1141 | print_rq_dlg_ttu(dc, pipe_ctx); |
| 1142 | } |
| 1143 | */ |
| 1144 | if (dc->vm_pa_config.valid) { |
| 1145 | struct vm_system_aperture_param apt; |
| 1146 | |
| 1147 | apt.sys_default.quad_part = 0; |
| 1148 | |
| 1149 | apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr; |
| 1150 | apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr; |
| 1151 | |
| 1152 | // Program system aperture settings |
| 1153 | pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); |
| 1154 | } |
| 1155 | |
| 1156 | // if (dc->debug.sanity_checks) { |
| 1157 | // dcn10_verify_allow_pstate_change_high(dc); |
| 1158 | // } |
| 1159 | } |
| 1160 | |
| 1161 | void dcn20_pipe_control_lock( |
| 1162 | struct dc *dc, |
| 1163 | struct pipe_ctx *pipe, |
| 1164 | bool_Bool lock) |
| 1165 | { |
| 1166 | bool_Bool flip_immediate = false0; |
| 1167 | |
| 1168 | /* use TG master update lock to lock everything on the TG |
| 1169 | * therefore only top pipe need to lock |
| 1170 | */ |
| 1171 | if (!pipe || pipe->top_pipe) |
| 1172 | return; |
| 1173 | |
| 1174 | if (pipe->plane_state != NULL((void *)0)) |
| 1175 | flip_immediate = pipe->plane_state->flip_immediate; |
| 1176 | |
| 1177 | if (flip_immediate && lock) { |
| 1178 | const int TIMEOUT_FOR_FLIP_PENDING = 100000; |
| 1179 | int i; |
| 1180 | |
| 1181 | for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { |
| 1182 | if (!pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) |
| 1183 | break; |
| 1184 | udelay(1); |
| 1185 | } |
| 1186 | |
| 1187 | if (pipe->bottom_pipe != NULL((void *)0)) { |
| 1188 | for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { |
| 1189 | if (!pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) |
| 1190 | break; |
| 1191 | udelay(1); |
| 1192 | } |
| 1193 | } |
| 1194 | } |
| 1195 | |
| 1196 | /* In flip immediate and pipe splitting case, we need to use GSL |
| 1197 | * for synchronization. Only do setup on locking and on flip type change. |
| 1198 | */ |
| 1199 | if (lock && pipe->bottom_pipe != NULL((void *)0)) |
| 1200 | if ((flip_immediate && pipe->stream_res.gsl_group == 0) || |
| 1201 | (!flip_immediate && pipe->stream_res.gsl_group > 0)) |
| 1202 | dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate); |
| 1203 | |
| 1204 | if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) { |
| 1205 | union dmub_hw_lock_flags hw_locks = { 0 }; |
| 1206 | struct dmub_hw_lock_inst_flags inst_flags = { 0 }; |
| 1207 | |
| 1208 | hw_locks.bits.lock_pipe = 1; |
| 1209 | inst_flags.otg_inst = pipe->stream_res.tg->inst; |
| 1210 | |
| 1211 | if (pipe->plane_state != NULL((void *)0)) |
| 1212 | hw_locks.bits.triple_buffer_lock = pipe->plane_state->triplebuffer_flips; |
| 1213 | |
| 1214 | dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, |
| 1215 | lock, |
| 1216 | &hw_locks, |
| 1217 | &inst_flags); |
| 1218 | } else if (pipe->plane_state != NULL((void *)0) && pipe->plane_state->triplebuffer_flips) { |
| 1219 | if (lock) |
| 1220 | pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg); |
| 1221 | else |
| 1222 | pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg); |
| 1223 | } else { |
| 1224 | if (lock) |
| 1225 | pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); |
| 1226 | else |
| 1227 | pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); |
| 1228 | } |
| 1229 | } |
| 1230 | |
| 1231 | static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe) |
| 1232 | { |
| 1233 | new_pipe->update_flags.raw = 0; |
| 1234 | |
| 1235 | /* Exit on unchanged, unused pipe */ |
| 1236 | if (!old_pipe->plane_state && !new_pipe->plane_state) |
| 1237 | return; |
| 1238 | /* Detect pipe enable/disable */ |
| 1239 | if (!old_pipe->plane_state && new_pipe->plane_state) { |
| 1240 | new_pipe->update_flags.bits.enable = 1; |
| 1241 | new_pipe->update_flags.bits.mpcc = 1; |
| 1242 | new_pipe->update_flags.bits.dppclk = 1; |
| 1243 | new_pipe->update_flags.bits.hubp_interdependent = 1; |
| 1244 | new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; |
| 1245 | new_pipe->update_flags.bits.gamut_remap = 1; |
| 1246 | new_pipe->update_flags.bits.scaler = 1; |
| 1247 | new_pipe->update_flags.bits.viewport = 1; |
| 1248 | if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { |
| 1249 | new_pipe->update_flags.bits.odm = 1; |
| 1250 | new_pipe->update_flags.bits.global_sync = 1; |
| 1251 | } |
| 1252 | return; |
| 1253 | } |
| 1254 | if (old_pipe->plane_state && !new_pipe->plane_state) { |
| 1255 | new_pipe->update_flags.bits.disable = 1; |
| 1256 | return; |
| 1257 | } |
| 1258 | |
| 1259 | /* Detect plane change */ |
| 1260 | if (old_pipe->plane_state != new_pipe->plane_state) { |
| 1261 | new_pipe->update_flags.bits.plane_changed = true1; |
| 1262 | } |
| 1263 | |
| 1264 | /* Detect top pipe only changes */ |
| 1265 | if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { |
| 1266 | /* Detect odm changes */ |
| 1267 | if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe |
| 1268 | && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx) |
| 1269 | || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe) |
| 1270 | || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe) |
| 1271 | || old_pipe->stream_res.opp != new_pipe->stream_res.opp) |
| 1272 | new_pipe->update_flags.bits.odm = 1; |
| 1273 | |
| 1274 | /* Detect global sync changes */ |
| 1275 | if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset |
| 1276 | || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start |
| 1277 | || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset |
| 1278 | || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width) |
| 1279 | new_pipe->update_flags.bits.global_sync = 1; |
| 1280 | } |
| 1281 | |
| 1282 | /* |
| 1283 | * Detect opp / tg change, only set on change, not on enable |
| 1284 | * Assume mpcc inst = pipe index, if not this code needs to be updated |
| 1285 | * since mpcc is what is affected by these. In fact all of our sequence |
| 1286 | * makes this assumption at the moment with how hubp reset is matched to |
| 1287 | * same index mpcc reset. |
| 1288 | */ |
| 1289 | if (old_pipe->stream_res.opp != new_pipe->stream_res.opp) |
| 1290 | new_pipe->update_flags.bits.opp_changed = 1; |
| 1291 | if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) |
| 1292 | new_pipe->update_flags.bits.tg_changed = 1; |
| 1293 | |
| 1294 | /* |
| 1295 | * Detect mpcc blending changes, only dpp inst and opp matter here, |
| 1296 | * mpccs getting removed/inserted update connected ones during their own |
| 1297 | * programming |
| 1298 | */ |
| 1299 | if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp |
| 1300 | || old_pipe->stream_res.opp != new_pipe->stream_res.opp) |
| 1301 | new_pipe->update_flags.bits.mpcc = 1; |
| 1302 | |
| 1303 | /* Detect dppclk change */ |
| 1304 | if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz) |
| 1305 | new_pipe->update_flags.bits.dppclk = 1; |
| 1306 | |
| 1307 | /* Check for scl update */ |
| 1308 | if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))__builtin_memcmp((&old_pipe->plane_res.scl_data), (& new_pipe->plane_res.scl_data), (sizeof(struct scaler_data) ))) |
| 1309 | new_pipe->update_flags.bits.scaler = 1; |
| 1310 | /* Check for vp update */ |
| 1311 | if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))__builtin_memcmp((&old_pipe->plane_res.scl_data.viewport ), (&new_pipe->plane_res.scl_data.viewport), (sizeof(struct rect))) |
| 1312 | || memcmp(&old_pipe->plane_res.scl_data.viewport_c,__builtin_memcmp((&old_pipe->plane_res.scl_data.viewport_c ), (&new_pipe->plane_res.scl_data.viewport_c), (sizeof (struct rect))) |
| 1313 | &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))__builtin_memcmp((&old_pipe->plane_res.scl_data.viewport_c ), (&new_pipe->plane_res.scl_data.viewport_c), (sizeof (struct rect)))) |
| 1314 | new_pipe->update_flags.bits.viewport = 1; |
| 1315 | |
| 1316 | /* Detect dlg/ttu/rq updates */ |
| 1317 | { |
| 1318 | struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs; |
| 1319 | struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs; |
| 1320 | struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs; |
| 1321 | struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs; |
| 1322 | |
| 1323 | /* Detect pipe interdependent updates */ |
| 1324 | if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch || |
| 1325 | old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch || |
| 1326 | old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c || |
| 1327 | old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank || |
| 1328 | old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank || |
| 1329 | old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip || |
| 1330 | old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip || |
| 1331 | old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l || |
| 1332 | old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c || |
| 1333 | old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l || |
| 1334 | old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l || |
| 1335 | old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c || |
| 1336 | old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l || |
| 1337 | old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c || |
| 1338 | old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 || |
| 1339 | old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 || |
| 1340 | old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank || |
| 1341 | old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) { |
| 1342 | old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch; |
| 1343 | old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch; |
| 1344 | old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c; |
| 1345 | old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank; |
| 1346 | old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank; |
| 1347 | old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip; |
| 1348 | old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip; |
| 1349 | old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l; |
| 1350 | old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c; |
| 1351 | old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l; |
| 1352 | old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l; |
| 1353 | old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c; |
| 1354 | old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l; |
| 1355 | old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c; |
| 1356 | old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0; |
| 1357 | old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1; |
| 1358 | old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank; |
| 1359 | old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip; |
| 1360 | new_pipe->update_flags.bits.hubp_interdependent = 1; |
| 1361 | } |
| 1362 | /* Detect any other updates to ttu/rq/dlg */ |
| 1363 | if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr))__builtin_memcmp((&old_dlg_attr), (&new_pipe->dlg_regs ), (sizeof(old_dlg_attr))) || |
| 1364 | memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr))__builtin_memcmp((&old_ttu_attr), (&new_pipe->ttu_regs ), (sizeof(old_ttu_attr))) || |
| 1365 | memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs))__builtin_memcmp((&old_pipe->rq_regs), (&new_pipe-> rq_regs), (sizeof(old_pipe->rq_regs)))) |
| 1366 | new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; |
| 1367 | } |
| 1368 | } |
| 1369 | |
| 1370 | static void dcn20_update_dchubp_dpp( |
| 1371 | struct dc *dc, |
| 1372 | struct pipe_ctx *pipe_ctx, |
| 1373 | struct dc_state *context) |
| 1374 | { |
| 1375 | struct dce_hwseq *hws = dc->hwseq; |
| 1376 | struct hubp *hubp = pipe_ctx->plane_res.hubp; |
| 1377 | struct dpp *dpp = pipe_ctx->plane_res.dpp; |
| 1378 | struct dc_plane_state *plane_state = pipe_ctx->plane_state; |
| 1379 | bool_Bool viewport_changed = false0; |
| 1380 | |
| 1381 | if (pipe_ctx->update_flags.bits.dppclk) |
| 1382 | dpp->funcs->dpp_dppclk_control(dpp, false0, true1); |
| 1383 | |
| 1384 | /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG |
| 1385 | * VTG is within DCHUBBUB which is commond block share by each pipe HUBP. |
| 1386 | * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG |
| 1387 | */ |
| 1388 | if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) { |
| 1389 | hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst); |
| 1390 | |
| 1391 | hubp->funcs->hubp_setup( |
| 1392 | hubp, |
| 1393 | &pipe_ctx->dlg_regs, |
| 1394 | &pipe_ctx->ttu_regs, |
| 1395 | &pipe_ctx->rq_regs, |
| 1396 | &pipe_ctx->pipe_dlg_param); |
| 1397 | } |
| 1398 | if (pipe_ctx->update_flags.bits.hubp_interdependent) |
| 1399 | hubp->funcs->hubp_setup_interdependent( |
| 1400 | hubp, |
| 1401 | &pipe_ctx->dlg_regs, |
| 1402 | &pipe_ctx->ttu_regs); |
| 1403 | |
| 1404 | if (pipe_ctx->update_flags.bits.enable || |
| 1405 | pipe_ctx->update_flags.bits.plane_changed || |
| 1406 | plane_state->update_flags.bits.bpp_change || |
| 1407 | plane_state->update_flags.bits.input_csc_change || |
| 1408 | plane_state->update_flags.bits.color_space_change || |
| 1409 | plane_state->update_flags.bits.coeff_reduction_change) { |
| 1410 | struct dc_bias_and_scale bns_params = {0}; |
| 1411 | |
| 1412 | // program the input csc |
| 1413 | dpp->funcs->dpp_setup(dpp, |
| 1414 | plane_state->format, |
| 1415 | EXPANSION_MODE_ZERO, |
| 1416 | plane_state->input_csc_color_matrix, |
| 1417 | plane_state->color_space, |
| 1418 | NULL((void *)0)); |
| 1419 | |
| 1420 | if (dpp->funcs->dpp_program_bias_and_scale) { |
| 1421 | //TODO :for CNVC set scale and bias registers if necessary |
| 1422 | build_prescale_params(&bns_params, plane_state); |
| 1423 | dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); |
| 1424 | } |
| 1425 | } |
| 1426 | |
| 1427 | if (pipe_ctx->update_flags.bits.mpcc |
| 1428 | || pipe_ctx->update_flags.bits.plane_changed |
| 1429 | || plane_state->update_flags.bits.global_alpha_change |
| 1430 | || plane_state->update_flags.bits.per_pixel_alpha_change) { |
| 1431 | // MPCC inst is equal to pipe index in practice |
| 1432 | int mpcc_inst = hubp->inst; |
| 1433 | int opp_inst; |
| 1434 | int opp_count = dc->res_pool->pipe_count; |
| 1435 | |
| 1436 | for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { |
| 1437 | if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { |
| 1438 | dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); |
| 1439 | dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false0; |
| 1440 | break; |
| 1441 | } |
| 1442 | } |
| 1443 | hws->funcs.update_mpcc(dc, pipe_ctx); |
| 1444 | } |
| 1445 | |
| 1446 | if (pipe_ctx->update_flags.bits.scaler || |
| 1447 | plane_state->update_flags.bits.scaling_change || |
| 1448 | plane_state->update_flags.bits.position_change || |
| 1449 | plane_state->update_flags.bits.per_pixel_alpha_change || |
| 1450 | pipe_ctx->stream->update_flags.bits.scaling) { |
| 1451 | pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; |
| 1452 | ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP)do { if (({ static int __warned; int __ret = !!(!(pipe_ctx-> plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn20/dcn20_hwseq.c" , 1452); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
| 1453 | /* scaler configuration */ |
| 1454 | pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler( |
| 1455 | pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); |
| 1456 | } |
| 1457 | |
| 1458 | if (pipe_ctx->update_flags.bits.viewport || |
| 1459 | (context == dc->current_state && plane_state->update_flags.bits.position_change) || |
| 1460 | (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || |
| 1461 | (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { |
| 1462 | |
| 1463 | hubp->funcs->mem_program_viewport( |
| 1464 | hubp, |
| 1465 | &pipe_ctx->plane_res.scl_data.viewport, |
| 1466 | &pipe_ctx->plane_res.scl_data.viewport_c); |
| 1467 | viewport_changed = true1; |
| 1468 | } |
| 1469 | |
| 1470 | /* Any updates are handled in dc interface, just need to apply existing for plane enable */ |
| 1471 | if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || |
| 1472 | pipe_ctx->update_flags.bits.scaler || viewport_changed == true1) && |
| 1473 | pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { |
| 1474 | dc->hwss.set_cursor_position(pipe_ctx); |
| 1475 | dc->hwss.set_cursor_attribute(pipe_ctx); |
| 1476 | |
| 1477 | if (dc->hwss.set_cursor_sdr_white_level) |
| 1478 | dc->hwss.set_cursor_sdr_white_level(pipe_ctx); |
| 1479 | } |
| 1480 | |
| 1481 | /* Any updates are handled in dc interface, just need |
| 1482 | * to apply existing for plane enable / opp change */ |
| 1483 | if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed |
| 1484 | || pipe_ctx->stream->update_flags.bits.gamut_remap |
| 1485 | || pipe_ctx->stream->update_flags.bits.out_csc) { |
| 1486 | #if defined(CONFIG_DRM_AMD_DC_DCN3_01) |
| 1487 | struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; |
| 1488 | |
| 1489 | if (mpc->funcs->set_gamut_remap) { |
| 1490 | int i; |
| 1491 | int mpcc_id = hubp->inst; |
| 1492 | struct mpc_grph_gamut_adjustment adjust; |
| 1493 | bool_Bool enable_remap_dpp = false0; |
| 1494 | |
| 1495 | memset(&adjust, 0, sizeof(adjust))__builtin_memset((&adjust), (0), (sizeof(adjust))); |
| 1496 | adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; |
| 1497 | |
| 1498 | /* save the enablement of gamut remap for dpp */ |
| 1499 | enable_remap_dpp = pipe_ctx->stream->gamut_remap_matrix.enable_remap; |
| 1500 | |
| 1501 | /* force bypass gamut remap for dpp/cm */ |
| 1502 | pipe_ctx->stream->gamut_remap_matrix.enable_remap = false0; |
| 1503 | dc->hwss.program_gamut_remap(pipe_ctx); |
| 1504 | |
| 1505 | /* restore gamut remap flag and use this remap into mpc */ |
| 1506 | pipe_ctx->stream->gamut_remap_matrix.enable_remap = enable_remap_dpp; |
| 1507 | |
| 1508 | /* build remap matrix for top plane if enabled */ |
| 1509 | if (enable_remap_dpp && pipe_ctx->top_pipe == NULL((void *)0)) { |
| 1510 | adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; |
| 1511 | for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE12; i++) |
| 1512 | adjust.temperature_matrix[i] = |
| 1513 | pipe_ctx->stream->gamut_remap_matrix.matrix[i]; |
| 1514 | } |
| 1515 | mpc->funcs->set_gamut_remap(mpc, mpcc_id, &adjust); |
| 1516 | } else |
| 1517 | #endif |
| 1518 | /* dpp/cm gamut remap*/ |
| 1519 | dc->hwss.program_gamut_remap(pipe_ctx); |
| 1520 | |
| 1521 | /*call the dcn2 method which uses mpc csc*/ |
| 1522 | dc->hwss.program_output_csc(dc, |
| 1523 | pipe_ctx, |
| 1524 | pipe_ctx->stream->output_color_space, |
| 1525 | pipe_ctx->stream->csc_color_matrix.matrix, |
| 1526 | hubp->opp_id); |
| 1527 | } |
| 1528 | |
| 1529 | if (pipe_ctx->update_flags.bits.enable || |
| 1530 | pipe_ctx->update_flags.bits.plane_changed || |
| 1531 | pipe_ctx->update_flags.bits.opp_changed || |
| 1532 | plane_state->update_flags.bits.pixel_format_change || |
| 1533 | plane_state->update_flags.bits.horizontal_mirror_change || |
| 1534 | plane_state->update_flags.bits.rotation_change || |
| 1535 | plane_state->update_flags.bits.swizzle_change || |
| 1536 | plane_state->update_flags.bits.dcc_change || |
| 1537 | plane_state->update_flags.bits.bpp_change || |
| 1538 | plane_state->update_flags.bits.scaling_change || |
| 1539 | plane_state->update_flags.bits.plane_size_change) { |
| 1540 | struct plane_size size = plane_state->plane_size; |
| 1541 | |
| 1542 | size.surface_size = pipe_ctx->plane_res.scl_data.viewport; |
| 1543 | hubp->funcs->hubp_program_surface_config( |
| 1544 | hubp, |
| 1545 | plane_state->format, |
| 1546 | &plane_state->tiling_info, |
| 1547 | &size, |
| 1548 | plane_state->rotation, |
| 1549 | &plane_state->dcc, |
| 1550 | plane_state->horizontal_mirror, |
| 1551 | 0); |
| 1552 | hubp->power_gated = false0; |
| 1553 | } |
| 1554 | |
| 1555 | if (pipe_ctx->update_flags.bits.enable || |
| 1556 | pipe_ctx->update_flags.bits.plane_changed || |
| 1557 | plane_state->update_flags.bits.addr_update) |
| 1558 | hws->funcs.update_plane_addr(dc, pipe_ctx); |
| 1559 | |
| 1560 | |
| 1561 | |
| 1562 | if (pipe_ctx->update_flags.bits.enable) |
| 1563 | hubp->funcs->set_blank(hubp, false0); |
| 1564 | } |
| 1565 | |
| 1566 | |
| 1567 | static void dcn20_program_pipe( |
| 1568 | struct dc *dc, |
| 1569 | struct pipe_ctx *pipe_ctx, |
| 1570 | struct dc_state *context) |
| 1571 | { |
| 1572 | struct dce_hwseq *hws = dc->hwseq; |
| 1573 | /* Only need to unblank on top pipe */ |
| 1574 | if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level) |
| 1575 | && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) |
| 1576 | hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); |
| 1577 | |
| 1578 | if (pipe_ctx->update_flags.bits.global_sync) { |
| 1579 | pipe_ctx->stream_res.tg->funcs->program_global_sync( |
| 1580 | pipe_ctx->stream_res.tg, |
| 1581 | pipe_ctx->pipe_dlg_param.vready_offset, |
| 1582 | pipe_ctx->pipe_dlg_param.vstartup_start, |
| 1583 | pipe_ctx->pipe_dlg_param.vupdate_offset, |
| 1584 | pipe_ctx->pipe_dlg_param.vupdate_width); |
| 1585 | |
| 1586 | pipe_ctx->stream_res.tg->funcs->set_vtg_params( |
| 1587 | pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); |
| 1588 | |
| 1589 | if (hws->funcs.setup_vupdate_interrupt) |
| 1590 | hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); |
| 1591 | } |
| 1592 | |
| 1593 | if (pipe_ctx->update_flags.bits.odm) |
| 1594 | hws->funcs.update_odm(dc, context, pipe_ctx); |
| 1595 | |
| 1596 | if (pipe_ctx->update_flags.bits.enable) { |
| 1597 | dcn20_enable_plane(dc, pipe_ctx, context); |
| 1598 | if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes) |
| 1599 | dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub); |
| 1600 | } |
| 1601 | |
| 1602 | if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) |
| 1603 | dcn20_update_dchubp_dpp(dc, pipe_ctx, context); |
| 1604 | |
| 1605 | if (pipe_ctx->update_flags.bits.enable |
| 1606 | || pipe_ctx->plane_state->update_flags.bits.hdr_mult) |
| 1607 | hws->funcs.set_hdr_multiplier(pipe_ctx); |
| 1608 | |
| 1609 | if (pipe_ctx->update_flags.bits.enable || |
| 1610 | pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || |
| 1611 | pipe_ctx->plane_state->update_flags.bits.gamma_change) |
| 1612 | hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); |
| 1613 | |
| 1614 | /* dcn10_translate_regamma_to_hw_format takes 750us to finish |
| 1615 | * only do gamma programming for powering on, internal memcmp to avoid |
| 1616 | * updating on slave planes |
| 1617 | */ |
| 1618 | if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) |
| 1619 | hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); |
| 1620 | |
| 1621 | /* If the pipe has been enabled or has a different opp, we |
| 1622 | * should reprogram the fmt. This deals with cases where |
| 1623 | * interation between mpc and odm combine on different streams |
| 1624 | * causes a different pipe to be chosen to odm combine with. |
| 1625 | */ |
| 1626 | if (pipe_ctx->update_flags.bits.enable |
| 1627 | || pipe_ctx->update_flags.bits.opp_changed) { |
| 1628 | |
| 1629 | pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( |
| 1630 | pipe_ctx->stream_res.opp, |
| 1631 | COLOR_SPACE_YCBCR601, |
| 1632 | pipe_ctx->stream->timing.display_color_depth, |
| 1633 | pipe_ctx->stream->signal); |
| 1634 | |
| 1635 | pipe_ctx->stream_res.opp->funcs->opp_program_fmt( |
| 1636 | pipe_ctx->stream_res.opp, |
| 1637 | &pipe_ctx->stream->bit_depth_params, |
| 1638 | &pipe_ctx->stream->clamping); |
| 1639 | } |
| 1640 | } |
| 1641 | |
| 1642 | void dcn20_program_front_end_for_ctx( |
| 1643 | struct dc *dc, |
| 1644 | struct dc_state *context) |
| 1645 | { |
| 1646 | int i; |
| 1647 | struct dce_hwseq *hws = dc->hwseq; |
| 1648 | DC_LOGGER_INIT(dc->ctx->logger); |
| 1649 | |
| 1650 | /* Carry over GSL groups in case the context is changing. */ |
| 1651 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
| 1652 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
| 1653 | struct pipe_ctx *old_pipe_ctx = |
| 1654 | &dc->current_state->res_ctx.pipe_ctx[i]; |
| 1655 | |
| 1656 | if (pipe_ctx->stream == old_pipe_ctx->stream) |
| 1657 | pipe_ctx->stream_res.gsl_group = |
| 1658 | old_pipe_ctx->stream_res.gsl_group; |
| 1659 | } |
| 1660 | |
| 1661 | if (dc->hwss.program_triplebuffer != NULL((void *)0) && dc->debug.enable_tri_buf) { |
| 1662 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
| 1663 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
| 1664 | |
| 1665 | if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) { |
| 1666 | ASSERT(!pipe_ctx->plane_state->triplebuffer_flips)do { if (({ static int __warned; int __ret = !!(!(!pipe_ctx-> plane_state->triplebuffer_flips)); if (__ret && !__warned ) { printf("WARNING %s failed at %s:%d\n", "!(!pipe_ctx->plane_state->triplebuffer_flips)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn20/dcn20_hwseq.c" , 1666); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
| 1667 | /*turn off triple buffer for full update*/ |
| 1668 | dc->hwss.program_triplebuffer( |
| 1669 | dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); |
| 1670 | } |
| 1671 | } |
| 1672 | } |
| 1673 | |
| 1674 | /* Set pipe update flags and lock pipes */ |
| 1675 | for (i = 0; i < dc->res_pool->pipe_count; i++) |
| 1676 | dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], |
| 1677 | &context->res_ctx.pipe_ctx[i]); |
| 1678 | |
| 1679 | /* OTG blank before disabling all front ends */ |
| 1680 | for (i = 0; i < dc->res_pool->pipe_count; i++) |
| 1681 | if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable |
| 1682 | && !context->res_ctx.pipe_ctx[i].top_pipe |
| 1683 | && !context->res_ctx.pipe_ctx[i].prev_odm_pipe |
| 1684 | && context->res_ctx.pipe_ctx[i].stream) |
| 1685 | hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true1); |
| 1686 | |
| 1687 | /* Disconnect mpcc */ |
| 1688 | for (i = 0; i < dc->res_pool->pipe_count; i++) |
| 1689 | if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable |
| 1690 | || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { |
| 1691 | hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); |
| 1692 | DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx)__drm_dbg(DRM_UT_KMS, "Reset mpcc for pipe %d\n", dc->current_state ->res_ctx.pipe_ctx[i].pipe_idx); |
| 1693 | } |
| 1694 | |
| 1695 | /* |
| 1696 | * Program all updated pipes, order matters for mpcc setup. Start with |
| 1697 | * top pipe and program all pipes that follow in order |
| 1698 | */ |
| 1699 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
| 1700 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
| 1701 | |
| 1702 | if (pipe->plane_state && !pipe->top_pipe) { |
| 1703 | while (pipe) { |
| 1704 | dcn20_program_pipe(dc, pipe, context); |
| 1705 | pipe = pipe->bottom_pipe; |
| 1706 | } |
| 1707 | } |
| 1708 | /* Program secondary blending tree and writeback pipes */ |
| 1709 | pipe = &context->res_ctx.pipe_ctx[i]; |
| 1710 | if (!pipe->top_pipe && !pipe->prev_odm_pipe |
| 1711 | && pipe->stream && pipe->stream->num_wb_info > 0 |
| 1712 | && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw) |
| 1713 | || pipe->stream->update_flags.raw) |
| 1714 | && hws->funcs.program_all_writeback_pipes_in_tree) |
| 1715 | hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); |
| 1716 | } |
| 1717 | } |
| 1718 | |
| 1719 | void dcn20_post_unlock_program_front_end( |
| 1720 | struct dc *dc, |
| 1721 | struct dc_state *context) |
| 1722 | { |
| 1723 | int i; |
| 1724 | const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; |
| 1725 | struct dce_hwseq *hwseq = dc->hwseq; |
| 1726 | |
| 1727 | DC_LOGGER_INIT(dc->ctx->logger); |
| 1728 | |
| 1729 | for (i = 0; i < dc->res_pool->pipe_count; i++) |
| 1730 | if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) |
| 1731 | dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); |
| 1732 | |
| 1733 | /* |
| 1734 | * If we are enabling a pipe, we need to wait for pending clear as this is a critical |
| 1735 | * part of the enable operation otherwise, DM may request an immediate flip which |
| 1736 | * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which |
| 1737 | * is unsupported on DCN. |
| 1738 | */ |
| 1739 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
| 1740 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
| 1741 | |
| 1742 | if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) { |
| 1743 | struct hubp *hubp = pipe->plane_res.hubp; |
| 1744 | int j = 0; |
| 1745 | |
| 1746 | for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000 |
| 1747 | && hubp->funcs->hubp_is_flip_pending(hubp); j++) |
| 1748 | mdelay(1); |
| 1749 | } |
| 1750 | } |
| 1751 | |
| 1752 | /* WA to apply WM setting*/ |
| 1753 | if (hwseq->wa.DEGVIDCN21) |
| 1754 | dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub); |
| 1755 | |
| 1756 | |
| 1757 | /* WA for stutter underflow during MPO transitions when adding 2nd plane */ |
| 1758 | if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) { |
| 1759 | |
| 1760 | if (dc->current_state->stream_status[0].plane_count == 1 && |
| 1761 | context->stream_status[0].plane_count > 1) { |
| 1762 | |
| 1763 | struct timing_generator *tg = dc->res_pool->timing_generators[0]; |
| 1764 | |
| 1765 | dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false0); |
| 1766 | |
| 1767 | hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true1; |
| 1768 | hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg); |
| 1769 | } |
| 1770 | } |
| 1771 | } |
| 1772 | |
| 1773 | void dcn20_prepare_bandwidth( |
| 1774 | struct dc *dc, |
| 1775 | struct dc_state *context) |
| 1776 | { |
| 1777 | struct hubbub *hubbub = dc->res_pool->hubbub; |
| 1778 | |
| 1779 | dc->clk_mgr->funcs->update_clocks( |
| 1780 | dc->clk_mgr, |
| 1781 | context, |
| 1782 | false0); |
| 1783 | |
| 1784 | /* program dchubbub watermarks */ |
| 1785 | dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, |
| 1786 | &context->bw_ctx.bw.dcn.watermarks, |
| 1787 | dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, |
| 1788 | false0); |
| 1789 | } |
| 1790 | |
| 1791 | void dcn20_optimize_bandwidth( |
| 1792 | struct dc *dc, |
| 1793 | struct dc_state *context) |
| 1794 | { |
| 1795 | struct hubbub *hubbub = dc->res_pool->hubbub; |
| 1796 | |
| 1797 | /* program dchubbub watermarks */ |
| 1798 | hubbub->funcs->program_watermarks(hubbub, |
| 1799 | &context->bw_ctx.bw.dcn.watermarks, |
| 1800 | dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, |
| 1801 | true1); |
| 1802 | |
| 1803 | dc->clk_mgr->funcs->update_clocks( |
| 1804 | dc->clk_mgr, |
| 1805 | context, |
| 1806 | true1); |
| 1807 | } |
| 1808 | |
| 1809 | bool_Bool dcn20_update_bandwidth( |
| 1810 | struct dc *dc, |
| 1811 | struct dc_state *context) |
| 1812 | { |
| 1813 | int i; |
| 1814 | struct dce_hwseq *hws = dc->hwseq; |
| 1815 | |
| 1816 | /* recalculate DML parameters */ |
| 1817 | if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false0)) |
| 1818 | return false0; |
| 1819 | |
| 1820 | /* apply updated bandwidth parameters */ |
| 1821 | dc->hwss.prepare_bandwidth(dc, context); |
| 1822 | |
| 1823 | /* update hubp configs for all pipes */ |
| 1824 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
| 1825 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
| 1826 | |
| 1827 | if (pipe_ctx->plane_state == NULL((void *)0)) |
| 1828 | continue; |
| 1829 | |
| 1830 | if (pipe_ctx->top_pipe == NULL((void *)0)) { |
| 1831 | bool_Bool blank = !is_pipe_tree_visible(pipe_ctx); |
| 1832 | |
| 1833 | pipe_ctx->stream_res.tg->funcs->program_global_sync( |
| 1834 | pipe_ctx->stream_res.tg, |
| 1835 | pipe_ctx->pipe_dlg_param.vready_offset, |
| 1836 | pipe_ctx->pipe_dlg_param.vstartup_start, |
| 1837 | pipe_ctx->pipe_dlg_param.vupdate_offset, |
| 1838 | pipe_ctx->pipe_dlg_param.vupdate_width); |
| 1839 | |
| 1840 | pipe_ctx->stream_res.tg->funcs->set_vtg_params( |
| 1841 | pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); |
| 1842 | |
| 1843 | if (pipe_ctx->prev_odm_pipe == NULL((void *)0)) |
| 1844 | hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); |
| 1845 | |
| 1846 | if (hws->funcs.setup_vupdate_interrupt) |
| 1847 | hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); |
| 1848 | } |
| 1849 | |
| 1850 | pipe_ctx->plane_res.hubp->funcs->hubp_setup( |
| 1851 | pipe_ctx->plane_res.hubp, |
| 1852 | &pipe_ctx->dlg_regs, |
| 1853 | &pipe_ctx->ttu_regs, |
| 1854 | &pipe_ctx->rq_regs, |
| 1855 | &pipe_ctx->pipe_dlg_param); |
| 1856 | } |
| 1857 | |
| 1858 | return true1; |
| 1859 | } |
| 1860 | |
| 1861 | void dcn20_enable_writeback( |
| 1862 | struct dc *dc, |
| 1863 | struct dc_writeback_info *wb_info, |
| 1864 | struct dc_state *context) |
| 1865 | { |
| 1866 | struct dwbc *dwb; |
| 1867 | struct mcif_wb *mcif_wb; |
| 1868 | struct timing_generator *optc; |
| 1869 | |
| 1870 | ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES)do { if (({ static int __warned; int __ret = !!(!(wb_info-> dwb_pipe_inst < 1)); if (__ret && !__warned) { printf ("WARNING %s failed at %s:%d\n", "!(wb_info->dwb_pipe_inst < 1)" , "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn20/dcn20_hwseq.c" , 1870); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
| 1871 | ASSERT(wb_info->wb_enabled)do { if (({ static int __warned; int __ret = !!(!(wb_info-> wb_enabled)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(wb_info->wb_enabled)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn20/dcn20_hwseq.c" , 1871); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
| 1872 | dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; |
| 1873 | mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; |
| 1874 | |
| 1875 | /* set the OPTC source mux */ |
| 1876 | optc = dc->res_pool->timing_generators[dwb->otg_inst]; |
| 1877 | optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); |
| 1878 | /* set MCIF_WB buffer and arbitration configuration */ |
| 1879 | mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); |
| 1880 | mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); |
| 1881 | /* Enable MCIF_WB */ |
| 1882 | mcif_wb->funcs->enable_mcif(mcif_wb); |
| 1883 | /* Enable DWB */ |
| 1884 | dwb->funcs->enable(dwb, &wb_info->dwb_params); |
| 1885 | /* TODO: add sequence to enable/disable warmup */ |
| 1886 | } |
| 1887 | |
| 1888 | void dcn20_disable_writeback( |
| 1889 | struct dc *dc, |
| 1890 | unsigned int dwb_pipe_inst) |
| 1891 | { |
| 1892 | struct dwbc *dwb; |
| 1893 | struct mcif_wb *mcif_wb; |
| 1894 | |
| 1895 | ASSERT(dwb_pipe_inst < MAX_DWB_PIPES)do { if (({ static int __warned; int __ret = !!(!(dwb_pipe_inst < 1)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(dwb_pipe_inst < 1)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn20/dcn20_hwseq.c" , 1895); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
| 1896 | dwb = dc->res_pool->dwbc[dwb_pipe_inst]; |
| 1897 | mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst]; |
| 1898 | |
| 1899 | dwb->funcs->disable(dwb); |
| 1900 | mcif_wb->funcs->disable_mcif(mcif_wb); |
| 1901 | } |
| 1902 | |
| 1903 | bool_Bool dcn20_wait_for_blank_complete( |
| 1904 | struct output_pixel_processor *opp) |
| 1905 | { |
| 1906 | int counter; |
| 1907 | |
| 1908 | for (counter = 0; counter < 1000; counter++) { |
| 1909 | if (opp->funcs->dpg_is_blanked(opp)) |
| 1910 | break; |
| 1911 | |
| 1912 | udelay(100); |
| 1913 | } |
| 1914 | |
| 1915 | if (counter == 1000) { |
| 1916 | dm_error("DC: failed to blank crtc!\n")__drm_err("DC: failed to blank crtc!\n"); |
| 1917 | return false0; |
| 1918 | } |
| 1919 | |
| 1920 | return true1; |
| 1921 | } |
| 1922 | |
| 1923 | bool_Bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) |
| 1924 | { |
| 1925 | struct hubp *hubp = pipe_ctx->plane_res.hubp; |
| 1926 | |
| 1927 | if (!hubp) |
| 1928 | return false0; |
| 1929 | return hubp->funcs->dmdata_status_done(hubp); |
| 1930 | } |
| 1931 | |
| 1932 | void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) |
| 1933 | { |
| 1934 | struct dce_hwseq *hws = dc->hwseq; |
| 1935 | |
| 1936 | if (pipe_ctx->stream_res.dsc) { |
| 1937 | struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; |
| 1938 | |
| 1939 | hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true1); |
| 1940 | while (odm_pipe) { |
| 1941 | hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true1); |
| 1942 | odm_pipe = odm_pipe->next_odm_pipe; |
| 1943 | } |
| 1944 | } |
| 1945 | } |
| 1946 | |
| 1947 | void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) |
| 1948 | { |
| 1949 | struct dce_hwseq *hws = dc->hwseq; |
| 1950 | |
| 1951 | if (pipe_ctx->stream_res.dsc) { |
| 1952 | struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; |
| 1953 | |
| 1954 | hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false0); |
| 1955 | while (odm_pipe) { |
| 1956 | hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false0); |
| 1957 | odm_pipe = odm_pipe->next_odm_pipe; |
| 1958 | } |
| 1959 | } |
| 1960 | } |
| 1961 | |
| 1962 | void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) |
| 1963 | { |
| 1964 | struct dc_dmdata_attributes attr = { 0 }; |
| 1965 | struct hubp *hubp = pipe_ctx->plane_res.hubp; |
| 1966 | |
| 1967 | attr.dmdata_mode = DMDATA_HW_MODE; |
| 1968 | attr.dmdata_size = |
| 1969 | dc_is_hdmi_signal(pipe_ctx->stream->signal) ? 32 : 36; |
| 1970 | attr.address.quad_part = |
| 1971 | pipe_ctx->stream->dmdata_address.quad_part; |
| 1972 | attr.dmdata_dl_delta = 0; |
| 1973 | attr.dmdata_qos_mode = 0; |
| 1974 | attr.dmdata_qos_level = 0; |
| 1975 | attr.dmdata_repeat = 1; /* always repeat */ |
| 1976 | attr.dmdata_updated = 1; |
| 1977 | attr.dmdata_sw_data = NULL((void *)0); |
| 1978 | |
| 1979 | hubp->funcs->dmdata_set_attributes(hubp, &attr); |
| 1980 | } |
| 1981 | |
| 1982 | void dcn20_init_vm_ctx( |
| 1983 | struct dce_hwseq *hws, |
| 1984 | struct dc *dc, |
| 1985 | struct dc_virtual_addr_space_config *va_config, |
| 1986 | int vmid) |
| 1987 | { |
| 1988 | struct dcn_hubbub_virt_addr_config config; |
| 1989 | |
| 1990 | if (vmid == 0) { |
| 1991 | ASSERT(0)do { if (({ static int __warned; int __ret = !!(!(0)); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(0)", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn20/dcn20_hwseq.c" , 1991); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); /* VMID cannot be 0 for vm context */ |
| 1992 | return; |
| 1993 | } |
| 1994 | |
| 1995 | config.page_table_start_addr = va_config->page_table_start_addr; |
| 1996 | config.page_table_end_addr = va_config->page_table_end_addr; |
| 1997 | config.page_table_block_size = va_config->page_table_block_size_in_bytes; |
| 1998 | config.page_table_depth = va_config->page_table_depth; |
| 1999 | config.page_table_base_addr = va_config->page_table_base_addr; |
| 2000 | |
| 2001 | dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid); |
| 2002 | } |
| 2003 | |
| 2004 | int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) |
| 2005 | { |
| 2006 | struct dcn_hubbub_phys_addr_config config; |
| 2007 | |
| 2008 | config.system_aperture.fb_top = pa_config->system_aperture.fb_top; |
| 2009 | config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; |
| 2010 | config.system_aperture.fb_base = pa_config->system_aperture.fb_base; |
| 2011 | config.system_aperture.agp_top = pa_config->system_aperture.agp_top; |
| 2012 | config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot; |
| 2013 | config.system_aperture.agp_base = pa_config->system_aperture.agp_base; |
| 2014 | config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr; |
| 2015 | config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr; |
| 2016 | config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; |
| 2017 | config.page_table_default_page_addr = pa_config->page_table_default_page_addr; |
| 2018 | |
| 2019 | return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config); |
| 2020 | } |
| 2021 | |
| 2022 | static bool_Bool patch_address_for_sbs_tb_stereo( |
| 2023 | struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOCunion large_integer *addr) |
| 2024 | { |
| 2025 | struct dc_plane_state *plane_state = pipe_ctx->plane_state; |
| 2026 | bool_Bool sec_split = pipe_ctx->top_pipe && |
| 2027 | pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; |
| 2028 | if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO && |
| 2029 | (pipe_ctx->stream->timing.timing_3d_format == |
| 2030 | TIMING_3D_FORMAT_SIDE_BY_SIDE || |
| 2031 | pipe_ctx->stream->timing.timing_3d_format == |
| 2032 | TIMING_3D_FORMAT_TOP_AND_BOTTOM)) { |
| 2033 | *addr = plane_state->address.grph_stereo.left_addr; |
| 2034 | plane_state->address.grph_stereo.left_addr = |
| 2035 | plane_state->address.grph_stereo.right_addr; |
| 2036 | return true1; |
| 2037 | } |
| 2038 | |
| 2039 | if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE && |
| 2040 | plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) { |
| 2041 | plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO; |
| 2042 | plane_state->address.grph_stereo.right_addr = |
| 2043 | plane_state->address.grph_stereo.left_addr; |
| 2044 | } |
| 2045 | return false0; |
| 2046 | } |
| 2047 | |
| 2048 | void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) |
| 2049 | { |
| 2050 | bool_Bool addr_patched = false0; |
| 2051 | PHYSICAL_ADDRESS_LOCunion large_integer addr; |
| 2052 | struct dc_plane_state *plane_state = pipe_ctx->plane_state; |
| 2053 | |
| 2054 | if (plane_state == NULL((void *)0)) |
| 2055 | return; |
| 2056 | |
| 2057 | addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr); |
| 2058 | |
| 2059 | // Call Helper to track VMID use |
| 2060 | vm_helper_mark_vmid_used(dc->vm_helper, plane_state->address.vmid, pipe_ctx->plane_res.hubp->inst); |
| 2061 | |
| 2062 | pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr( |
| 2063 | pipe_ctx->plane_res.hubp, |
| 2064 | &plane_state->address, |
| 2065 | plane_state->flip_immediate); |
| 2066 | |
| 2067 | plane_state->status.requested_address = plane_state->address; |
| 2068 | |
| 2069 | if (plane_state->flip_immediate) |
| 2070 | plane_state->status.current_address = plane_state->address; |
| 2071 | |
| 2072 | if (addr_patched) |
| 2073 | pipe_ctx->plane_state->address.grph_stereo.left_addr = addr; |
| 2074 | } |
| 2075 | |
| 2076 | void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, |
| 2077 | struct dc_link_settings *link_settings) |
| 2078 | { |
| 2079 | struct encoder_unblank_param params = { { 0 } }; |
| 2080 | struct dc_stream_state *stream = pipe_ctx->stream; |
| 2081 | struct dc_link *link = stream->link; |
| 2082 | struct dce_hwseq *hws = link->dc->hwseq; |
| 2083 | struct pipe_ctx *odm_pipe; |
| 2084 | |
| 2085 | params.opp_cnt = 1; |
| 2086 | for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { |
| 2087 | params.opp_cnt++; |
| 2088 | } |
| 2089 | /* only 3 items below are used by unblank */ |
| 2090 | params.timing = pipe_ctx->stream->timing; |
| 2091 | |
| 2092 | params.link_settings.link_rate = link_settings->link_rate; |
| 2093 | |
| 2094 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) { |
| 2095 | if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) |
| 2096 | params.timing.pix_clk_100hz /= 2; |
| 2097 | pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( |
| 2098 | pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1); |
| 2099 | pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms); |
| 2100 | } |
| 2101 | |
| 2102 | if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { |
| 2103 | hws->funcs.edp_backlight_control(link, true1); |
| 2104 | } |
| 2105 | } |
| 2106 | |
| 2107 | void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) |
| 2108 | { |
| 2109 | struct timing_generator *tg = pipe_ctx->stream_res.tg; |
| 2110 | int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); |
| 2111 | |
| 2112 | if (start_line < 0) |
| 2113 | start_line = 0; |
| 2114 | |
| 2115 | if (tg->funcs->setup_vertical_interrupt2) |
| 2116 | tg->funcs->setup_vertical_interrupt2(tg, start_line); |
| 2117 | } |
| 2118 | |
| 2119 | static void dcn20_reset_back_end_for_pipe( |
| 2120 | struct dc *dc, |
| 2121 | struct pipe_ctx *pipe_ctx, |
| 2122 | struct dc_state *context) |
| 2123 | { |
| 2124 | int i; |
| 2125 | struct dc_link *link; |
| 2126 | DC_LOGGER_INIT(dc->ctx->logger); |
| 2127 | if (pipe_ctx->stream_res.stream_enc == NULL((void *)0)) { |
| 2128 | pipe_ctx->stream = NULL((void *)0); |
| 2129 | return; |
| 2130 | } |
| 2131 | |
| 2132 | if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)(dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS)) { |
| 2133 | link = pipe_ctx->stream->link; |
| 2134 | /* DPMS may already disable or */ |
| 2135 | /* dpms_off status is incorrect due to fastboot |
| 2136 | * feature. When system resume from S4 with second |
| 2137 | * screen only, the dpms_off would be true but |
| 2138 | * VBIOS lit up eDP, so check link status too. |
| 2139 | */ |
| 2140 | if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) |
| 2141 | core_link_disable_stream(pipe_ctx); |
| 2142 | else if (pipe_ctx->stream_res.audio) |
| 2143 | dc->hwss.disable_audio_stream(pipe_ctx); |
| 2144 | |
| 2145 | /* free acquired resources */ |
| 2146 | if (pipe_ctx->stream_res.audio) { |
| 2147 | /*disable az_endpoint*/ |
| 2148 | pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); |
| 2149 | |
| 2150 | /*free audio*/ |
| 2151 | if (dc->caps.dynamic_audio == true1) { |
| 2152 | /*we have to dynamic arbitrate the audio endpoints*/ |
| 2153 | /*we free the resource, need reset is_audio_acquired*/ |
| 2154 | update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, |
| 2155 | pipe_ctx->stream_res.audio, false0); |
| 2156 | pipe_ctx->stream_res.audio = NULL((void *)0); |
| 2157 | } |
| 2158 | } |
| 2159 | } |
| 2160 | else if (pipe_ctx->stream_res.dsc) { |
| 2161 | dp_set_dsc_enable(pipe_ctx, false0); |
| 2162 | } |
| 2163 | |
| 2164 | /* by upper caller loop, parent pipe: pipe0, will be reset last. |
| 2165 | * back end share by all pipes and will be disable only when disable |
| 2166 | * parent pipe. |
| 2167 | */ |
| 2168 | if (pipe_ctx->top_pipe == NULL((void *)0)) { |
| 2169 | |
| 2170 | dc->hwss.set_abm_immediate_disable(pipe_ctx); |
| 2171 | |
| 2172 | pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); |
| 2173 | |
| 2174 | pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false0); |
| 2175 | if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) |
| 2176 | pipe_ctx->stream_res.tg->funcs->set_odm_bypass( |
| 2177 | pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); |
| 2178 | |
| 2179 | if (pipe_ctx->stream_res.tg->funcs->set_drr) |
| 2180 | pipe_ctx->stream_res.tg->funcs->set_drr( |
| 2181 | pipe_ctx->stream_res.tg, NULL((void *)0)); |
| 2182 | } |
| 2183 | |
| 2184 | for (i = 0; i < dc->res_pool->pipe_count; i++) |
| 2185 | if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx) |
| 2186 | break; |
| 2187 | |
| 2188 | if (i == dc->res_pool->pipe_count) |
| 2189 | return; |
| 2190 | |
| 2191 | pipe_ctx->stream = NULL((void *)0); |
| 2192 | DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",__drm_dbg(DRM_UT_KMS, "Reset back end for pipe %d, tg:%d\n", pipe_ctx ->pipe_idx, pipe_ctx->stream_res.tg->inst) |
| 2193 | pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst)__drm_dbg(DRM_UT_KMS, "Reset back end for pipe %d, tg:%d\n", pipe_ctx ->pipe_idx, pipe_ctx->stream_res.tg->inst); |
| 2194 | } |
| 2195 | |
| 2196 | void dcn20_reset_hw_ctx_wrap( |
| 2197 | struct dc *dc, |
| 2198 | struct dc_state *context) |
| 2199 | { |
| 2200 | int i; |
| 2201 | struct dce_hwseq *hws = dc->hwseq; |
| 2202 | |
| 2203 | /* Reset Back End*/ |
| 2204 | for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { |
| 2205 | struct pipe_ctx *pipe_ctx_old = |
| 2206 | &dc->current_state->res_ctx.pipe_ctx[i]; |
| 2207 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
| 2208 | |
| 2209 | if (!pipe_ctx_old->stream) |
| 2210 | continue; |
| 2211 | |
| 2212 | if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) |
| 2213 | continue; |
| 2214 | |
| 2215 | if (!pipe_ctx->stream || |
| 2216 | pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { |
| 2217 | struct clock_source *old_clk = pipe_ctx_old->clock_source; |
| 2218 | |
| 2219 | dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); |
| 2220 | if (hws->funcs.enable_stream_gating) |
| 2221 | hws->funcs.enable_stream_gating(dc, pipe_ctx); |
| 2222 | if (old_clk) |
| 2223 | old_clk->funcs->cs_power_down(old_clk); |
| 2224 | } |
| 2225 | } |
| 2226 | } |
| 2227 | |
| 2228 | void dcn20_get_mpctree_visual_confirm_color( |
| 2229 | struct pipe_ctx *pipe_ctx, |
| 2230 | struct tg_color *color) |
| 2231 | { |
| 2232 | const struct tg_color pipe_colors[6] = { |
| 2233 | {MAX_TG_COLOR_VALUE0x3FF, 0, 0}, // red |
| 2234 | {MAX_TG_COLOR_VALUE0x3FF, 0, MAX_TG_COLOR_VALUE0x3FF}, // yellow |
| 2235 | {0, MAX_TG_COLOR_VALUE0x3FF, 0}, // blue |
| 2236 | {MAX_TG_COLOR_VALUE0x3FF / 2, 0, MAX_TG_COLOR_VALUE0x3FF / 2}, // purple |
| 2237 | {0, 0, MAX_TG_COLOR_VALUE0x3FF}, // green |
| 2238 | {MAX_TG_COLOR_VALUE0x3FF, MAX_TG_COLOR_VALUE0x3FF * 2 / 3, 0}, // orange |
| 2239 | }; |
| 2240 | |
| 2241 | struct pipe_ctx *top_pipe = pipe_ctx; |
| 2242 | |
| 2243 | while (top_pipe->top_pipe) { |
| 2244 | top_pipe = top_pipe->top_pipe; |
| 2245 | } |
| 2246 | |
| 2247 | *color = pipe_colors[top_pipe->pipe_idx]; |
| 2248 | } |
| 2249 | |
| 2250 | void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) |
| 2251 | { |
| 2252 | struct dce_hwseq *hws = dc->hwseq; |
| 2253 | struct hubp *hubp = pipe_ctx->plane_res.hubp; |
| 2254 | struct mpcc_blnd_cfg blnd_cfg = { {0} }; |
| 2255 | bool_Bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; |
| 2256 | int mpcc_id; |
| 2257 | struct mpcc *new_mpcc; |
| 2258 | struct mpc *mpc = dc->res_pool->mpc; |
| 2259 | struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); |
| 2260 | |
| 2261 | // input to MPCC is always RGB, by default leave black_color at 0 |
| 2262 | if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { |
| 2263 | hws->funcs.get_hdr_visual_confirm_color( |
| 2264 | pipe_ctx, &blnd_cfg.black_color); |
| 2265 | } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { |
| 2266 | hws->funcs.get_surface_visual_confirm_color( |
| 2267 | pipe_ctx, &blnd_cfg.black_color); |
| 2268 | } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) { |
| 2269 | dcn20_get_mpctree_visual_confirm_color( |
| 2270 | pipe_ctx, &blnd_cfg.black_color); |
| 2271 | } |
| 2272 | |
| 2273 | if (per_pixel_alpha) |
| 2274 | blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; |
| 2275 | else |
| 2276 | blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; |
| 2277 | |
| 2278 | blnd_cfg.overlap_only = false0; |
| 2279 | blnd_cfg.global_gain = 0xff; |
| 2280 | |
| 2281 | if (pipe_ctx->plane_state->global_alpha) |
| 2282 | blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; |
| 2283 | else |
| 2284 | blnd_cfg.global_alpha = 0xff; |
| 2285 | |
| 2286 | blnd_cfg.background_color_bpc = 4; |
| 2287 | blnd_cfg.bottom_gain_mode = 0; |
| 2288 | blnd_cfg.top_gain = 0x1f000; |
| 2289 | blnd_cfg.bottom_inside_gain = 0x1f000; |
| 2290 | blnd_cfg.bottom_outside_gain = 0x1f000; |
| 2291 | blnd_cfg.pre_multiplied_alpha = per_pixel_alpha; |
| 2292 | #if defined(CONFIG_DRM_AMD_DC_DCN3_01) |
| 2293 | if (pipe_ctx->plane_state->format |
| 2294 | == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA) |
| 2295 | blnd_cfg.pre_multiplied_alpha = false0; |
| 2296 | #endif |
| 2297 | |
| 2298 | /* |
| 2299 | * TODO: remove hack |
| 2300 | * Note: currently there is a bug in init_hw such that |
| 2301 | * on resume from hibernate, BIOS sets up MPCC0, and |
| 2302 | * we do mpcc_remove but the mpcc cannot go to idle |
| 2303 | * after remove. This cause us to pick mpcc1 here, |
| 2304 | * which causes a pstate hang for yet unknown reason. |
| 2305 | */ |
| 2306 | mpcc_id = hubp->inst; |
| 2307 | |
| 2308 | /* If there is no full update, don't need to touch MPC tree*/ |
| 2309 | if (!pipe_ctx->plane_state->update_flags.bits.full_update && |
| 2310 | !pipe_ctx->update_flags.bits.mpcc) { |
| 2311 | mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); |
| 2312 | return; |
| 2313 | } |
| 2314 | |
| 2315 | /* check if this MPCC is already being used */ |
| 2316 | new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); |
| 2317 | /* remove MPCC if being used */ |
| 2318 | if (new_mpcc != NULL((void *)0)) |
| 2319 | mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc); |
| 2320 | else |
| 2321 | if (dc->debug.sanity_checks) |
| 2322 | mpc->funcs->assert_mpcc_idle_before_connect( |
| 2323 | dc->res_pool->mpc, mpcc_id); |
| 2324 | |
| 2325 | /* Call MPC to insert new plane */ |
| 2326 | new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc, |
| 2327 | mpc_tree_params, |
| 2328 | &blnd_cfg, |
| 2329 | NULL((void *)0), |
| 2330 | NULL((void *)0), |
| 2331 | hubp->inst, |
| 2332 | mpcc_id); |
| 2333 | |
| 2334 | ASSERT(new_mpcc != NULL)do { if (({ static int __warned; int __ret = !!(!(new_mpcc != ((void *)0))); if (__ret && !__warned) { printf("WARNING %s failed at %s:%d\n" , "!(new_mpcc != ((void *)0))", "/usr/src/sys/dev/pci/drm/amd/display/dc/dcn20/dcn20_hwseq.c" , 2334); __warned = 1; } __builtin_expect(!!(__ret), 0); })) do {} while (0); } while (0); |
| 2335 | hubp->opp_id = pipe_ctx->stream_res.opp->inst; |
| 2336 | hubp->mpcc_id = mpcc_id; |
| 2337 | } |
| 2338 | |
| 2339 | void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) |
| 2340 | { |
| 2341 | enum dc_lane_count lane_count = |
| 2342 | pipe_ctx->stream->link->cur_link_settings.lane_count; |
| 2343 | |
| 2344 | struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; |
| 2345 | struct dc_link *link = pipe_ctx->stream->link; |
| 2346 | |
| 2347 | uint32_t active_total_with_borders; |
| 2348 | uint32_t early_control = 0; |
| 2349 | struct timing_generator *tg = pipe_ctx->stream_res.tg; |
| 2350 | |
| 2351 | /* For MST, there are multiply stream go to only one link. |
| 2352 | * connect DIG back_end to front_end while enable_stream and |
| 2353 | * disconnect them during disable_stream |
| 2354 | * BY this, it is logic clean to separate stream and link |
| 2355 | */ |
| 2356 | link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, |
| 2357 | pipe_ctx->stream_res.stream_enc->id, true1); |
| 2358 | |
| 2359 | if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { |
| 2360 | if (link->dc->hwss.program_dmdata_engine) |
| 2361 | link->dc->hwss.program_dmdata_engine(pipe_ctx); |
| 2362 | } |
| 2363 | |
| 2364 | link->dc->hwss.update_info_frame(pipe_ctx); |
| 2365 | |
| 2366 | /* enable early control to avoid corruption on DP monitor*/ |
| 2367 | active_total_with_borders = |
| 2368 | timing->h_addressable |
| 2369 | + timing->h_border_left |
| 2370 | + timing->h_border_right; |
| 2371 | |
| 2372 | if (lane_count != 0) |
| 2373 | early_control = active_total_with_borders % lane_count; |
| 2374 | |
| 2375 | if (early_control == 0) |
| 2376 | early_control = lane_count; |
| 2377 | |
| 2378 | tg->funcs->set_early_control(tg, early_control); |
| 2379 | |
| 2380 | /* enable audio only within mode set */ |
| 2381 | if (pipe_ctx->stream_res.audio != NULL((void *)0)) { |
| 2382 | if (dc_is_dp_signal(pipe_ctx->stream->signal)) |
| 2383 | pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); |
| 2384 | } |
| 2385 | } |
| 2386 | |
| 2387 | void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) |
| 2388 | { |
| 2389 | struct dc_stream_state *stream = pipe_ctx->stream; |
| 2390 | struct hubp *hubp = pipe_ctx->plane_res.hubp; |
| 2391 | bool_Bool enable = false0; |
| 2392 | struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; |
| 2393 | enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal) |
| 2394 | ? dmdata_dp |
| 2395 | : dmdata_hdmi; |
| 2396 | |
| 2397 | /* if using dynamic meta, don't set up generic infopackets */ |
| 2398 | if (pipe_ctx->stream->dmdata_address.quad_part != 0) { |
| 2399 | pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false0; |
| 2400 | enable = true1; |
| 2401 | } |
| 2402 | |
| 2403 | if (!hubp) |
| 2404 | return; |
| 2405 | |
| 2406 | if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata) |
| 2407 | return; |
| 2408 | |
| 2409 | stream_enc->funcs->set_dynamic_metadata(stream_enc, enable, |
| 2410 | hubp->inst, mode); |
| 2411 | } |
| 2412 | |
| 2413 | void dcn20_fpga_init_hw(struct dc *dc) |
| 2414 | { |
| 2415 | int i, j; |
| 2416 | struct dce_hwseq *hws = dc->hwseq; |
| 2417 | struct resource_pool *res_pool = dc->res_pool; |
| 2418 | struct dc_state *context = dc->current_state; |
| 2419 | |
| 2420 | if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) |
| 2421 | dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); |
| 2422 | |
| 2423 | // Initialize the dccg |
| 2424 | if (res_pool->dccg->funcs->dccg_init) |
| 2425 | res_pool->dccg->funcs->dccg_init(res_pool->dccg); |
| 2426 | |
| 2427 | //Enable ability to power gate / don't force power on permanently |
| 2428 | hws->funcs.enable_power_gating_plane(hws, true1); |
| 2429 | |
| 2430 | // Specific to FPGA dccg and registers |
| 2431 | REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF)dm_write_reg_func(hws->ctx, hws->regs->RBBMIF_TIMEOUT_DIS , 0xFFFFFFFF, __func__); |
| 2432 | REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF)dm_write_reg_func(hws->ctx, hws->regs->RBBMIF_TIMEOUT_DIS_2 , 0xFFFFFFFF, __func__); |
| 2433 | |
| 2434 | hws->funcs.dccg_init(hws); |
| 2435 | |
| 2436 | REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2)generic_reg_update_ex(hws->ctx, hws->regs->DCHUBBUB_GLOBAL_TIMER_CNTL , 1, hws->shifts->DCHUBBUB_GLOBAL_TIMER_REFDIV, hws-> masks->DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); |
| 2437 | REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1)generic_reg_update_ex(hws->ctx, hws->regs->DCHUBBUB_GLOBAL_TIMER_CNTL , 1, hws->shifts->DCHUBBUB_GLOBAL_TIMER_ENABLE, hws-> masks->DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); |
| 2438 | if (REG(REFCLK_CNTL)hws->regs->REFCLK_CNTL) |
| 2439 | REG_WRITE(REFCLK_CNTL, 0)dm_write_reg_func(hws->ctx, hws->regs->REFCLK_CNTL, 0 , __func__); |
| 2440 | // |
| 2441 | |
| 2442 | |
| 2443 | /* Blank pixel data with OPP DPG */ |
| 2444 | for (i = 0; i < dc->res_pool->timing_generator_count; i++) { |
| 2445 | struct timing_generator *tg = dc->res_pool->timing_generators[i]; |
| 2446 | |
| 2447 | if (tg->funcs->is_tg_enabled(tg)) |
| 2448 | dcn20_init_blank(dc, tg); |
| 2449 | } |
| 2450 | |
| 2451 | for (i = 0; i < res_pool->timing_generator_count; i++) { |
| 2452 | struct timing_generator *tg = dc->res_pool->timing_generators[i]; |
| 2453 | |
| 2454 | if (tg->funcs->is_tg_enabled(tg)) |
| 2455 | tg->funcs->lock(tg); |
| 2456 | } |
| 2457 | |
| 2458 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
| 2459 | struct dpp *dpp = res_pool->dpps[i]; |
| 2460 | |
| 2461 | dpp->funcs->dpp_reset(dpp); |
| 2462 | } |
| 2463 | |
| 2464 | /* Reset all MPCC muxes */ |
| 2465 | res_pool->mpc->funcs->mpc_init(res_pool->mpc); |
| 2466 | |
| 2467 | /* initialize OPP mpc_tree parameter */ |
| 2468 | for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { |
| 2469 | res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst; |
| 2470 | res_pool->opps[i]->mpc_tree_params.opp_list = NULL((void *)0); |
| 2471 | for (j = 0; j < MAX_PIPES6; j++) |
| 2472 | res_pool->opps[i]->mpcc_disconnect_pending[j] = false0; |
| 2473 | } |
| 2474 | |
| 2475 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
| 2476 | struct timing_generator *tg = dc->res_pool->timing_generators[i]; |
| 2477 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
| 2478 | struct hubp *hubp = dc->res_pool->hubps[i]; |
| 2479 | struct dpp *dpp = dc->res_pool->dpps[i]; |
| 2480 | |
| 2481 | pipe_ctx->stream_res.tg = tg; |
| 2482 | pipe_ctx->pipe_idx = i; |
| 2483 | |
| 2484 | pipe_ctx->plane_res.hubp = hubp; |
| 2485 | pipe_ctx->plane_res.dpp = dpp; |
| 2486 | pipe_ctx->plane_res.mpcc_inst = dpp->inst; |
| 2487 | hubp->mpcc_id = dpp->inst; |
| 2488 | hubp->opp_id = OPP_ID_INVALID0xf; |
| 2489 | hubp->power_gated = false0; |
| 2490 | pipe_ctx->stream_res.opp = NULL((void *)0); |
| 2491 | |
| 2492 | hubp->funcs->hubp_init(hubp); |
| 2493 | |
| 2494 | //dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; |
| 2495 | //dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; |
| 2496 | dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true1; |
| 2497 | pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; |
| 2498 | /*to do*/ |
| 2499 | hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); |
| 2500 | } |
| 2501 | |
| 2502 | /* initialize DWB pointer to MCIF_WB */ |
| 2503 | for (i = 0; i < res_pool->res_cap->num_dwb; i++) |
| 2504 | res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i]; |
| 2505 | |
| 2506 | for (i = 0; i < dc->res_pool->timing_generator_count; i++) { |
| 2507 | struct timing_generator *tg = dc->res_pool->timing_generators[i]; |
| 2508 | |
| 2509 | if (tg->funcs->is_tg_enabled(tg)) |
| 2510 | tg->funcs->unlock(tg); |
| 2511 | } |
| 2512 | |
| 2513 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
| 2514 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; |
| 2515 | |
| 2516 | dc->hwss.disable_plane(dc, pipe_ctx); |
| 2517 | |
| 2518 | pipe_ctx->stream_res.tg = NULL((void *)0); |
| 2519 | pipe_ctx->plane_res.hubp = NULL((void *)0); |
| 2520 | } |
| 2521 | |
| 2522 | for (i = 0; i < dc->res_pool->timing_generator_count; i++) { |
| 2523 | struct timing_generator *tg = dc->res_pool->timing_generators[i]; |
| 2524 | |
| 2525 | tg->funcs->tg_init(tg); |
| 2526 | } |
| 2527 | } |
| 2528 | #ifndef TRIM_FSFT |
| 2529 | bool_Bool dcn20_optimize_timing_for_fsft(struct dc *dc, |
| 2530 | struct dc_crtc_timing *timing, |
| 2531 | unsigned int max_input_rate_in_khz) |
| 2532 | { |
| 2533 | unsigned int old_v_front_porch; |
| 2534 | unsigned int old_v_total; |
| 2535 | unsigned int max_input_rate_in_100hz; |
| 2536 | unsigned long long new_v_total; |
| 2537 | |
| 2538 | max_input_rate_in_100hz = max_input_rate_in_khz * 10; |
| 2539 | if (max_input_rate_in_100hz < timing->pix_clk_100hz) |
| 2540 | return false0; |
| 2541 | |
| 2542 | old_v_total = timing->v_total; |
| 2543 | old_v_front_porch = timing->v_front_porch; |
| 2544 | |
| 2545 | timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz; |
| 2546 | timing->pix_clk_100hz = max_input_rate_in_100hz; |
| 2547 | |
| 2548 | new_v_total = div_u64((unsigned long long)old_v_total * max_input_rate_in_100hz, timing->pix_clk_100hz); |
| 2549 | |
| 2550 | timing->v_total = new_v_total; |
| 2551 | timing->v_front_porch = old_v_front_porch + (timing->v_total - old_v_total); |
| 2552 | return true1; |
| 2553 | } |
| 2554 | #endif |