| File: | dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c |
| Warning: | line 843, column 7 Branch condition evaluates to a garbage value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | |||
| 2 | * Copyright 2012-15 Advanced Micro Devices, Inc. | |||
| 3 | * | |||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
| 5 | * copy of this software and associated documentation files (the "Software"), | |||
| 6 | * to deal in the Software without restriction, including without limitation | |||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | |||
| 9 | * Software is furnished to do so, subject to the following conditions: | |||
| 10 | * | |||
| 11 | * The above copyright notice and this permission notice shall be included in | |||
| 12 | * all copies or substantial portions of the Software. | |||
| 13 | * | |||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | |||
| 21 | * | |||
| 22 | * Authors: AMD | |||
| 23 | * | |||
| 24 | */ | |||
| 25 | ||||
| 26 | #include <linux/version.h> | |||
| 27 | #include <drm/drm_atomic_helper.h> | |||
| 28 | #include <drm/drm_dp_mst_helper.h> | |||
| 29 | #include <drm/drm_dp_helper.h> | |||
| 30 | #include "dm_services.h" | |||
| 31 | #include "amdgpu.h" | |||
| 32 | #include "amdgpu_dm.h" | |||
| 33 | #include "amdgpu_dm_mst_types.h" | |||
| 34 | ||||
| 35 | #include "dc.h" | |||
| 36 | #include "dm_helpers.h" | |||
| 37 | ||||
| 38 | #include "dc_link_ddc.h" | |||
| 39 | #include "ddc_service_types.h" | |||
| 40 | #include "dpcd_defs.h" | |||
| 41 | ||||
| 42 | #include "i2caux_interface.h" | |||
| 43 | #if defined(CONFIG_DEBUG_FS) | |||
| 44 | #include "amdgpu_dm_debugfs.h" | |||
| 45 | #endif | |||
| 46 | ||||
| 47 | #if defined(CONFIG_DRM_AMD_DC_DCN1) | |||
| 48 | #include "dc/dcn20/dcn20_resource.h" | |||
| 49 | #endif | |||
| 50 | ||||
| 51 | static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, | |||
| 52 | struct drm_dp_aux_msg *msg) | |||
| 53 | { | |||
| 54 | ssize_t result = 0; | |||
| 55 | struct aux_payload payload; | |||
| 56 | enum aux_channel_operation_result operation_result; | |||
| 57 | ||||
| 58 | if (WARN_ON(msg->size > 16)({ int __ret = !!(msg->size > 16); if (__ret) printf("WARNING %s failed at %s:%d\n" , "msg->size > 16", "/usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c" , 58); __builtin_expect(!!(__ret), 0); })) | |||
| 59 | return -E2BIG7; | |||
| 60 | ||||
| 61 | payload.address = msg->address; | |||
| 62 | payload.data = msg->buffer; | |||
| 63 | payload.length = msg->size; | |||
| 64 | payload.reply = &msg->reply; | |||
| 65 | payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE0x8) == 0; | |||
| 66 | payload.write = (msg->request & DP_AUX_I2C_READ0x1) == 0; | |||
| 67 | payload.mot = (msg->request & DP_AUX_I2C_MOT0x4) != 0; | |||
| 68 | payload.defer_delay = 0; | |||
| 69 | ||||
| 70 | result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)({ const __typeof( ((struct amdgpu_dm_dp_aux *)0)->aux ) * __mptr = ((aux)); (struct amdgpu_dm_dp_aux *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_dm_dp_aux, aux) );})->ddc_service, &payload, | |||
| 71 | &operation_result); | |||
| 72 | ||||
| 73 | if (payload.write && result >= 0) | |||
| 74 | result = msg->size; | |||
| 75 | ||||
| 76 | if (result < 0) | |||
| 77 | switch (operation_result) { | |||
| 78 | case AUX_CHANNEL_OPERATION_SUCCEEDED: | |||
| 79 | break; | |||
| 80 | case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: | |||
| 81 | case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN: | |||
| 82 | result = -EIO5; | |||
| 83 | break; | |||
| 84 | case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: | |||
| 85 | case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: | |||
| 86 | result = -EBUSY16; | |||
| 87 | break; | |||
| 88 | case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: | |||
| 89 | result = -ETIMEDOUT60; | |||
| 90 | break; | |||
| 91 | } | |||
| 92 | ||||
| 93 | return result; | |||
| 94 | } | |||
| 95 | ||||
| 96 | static void | |||
| 97 | dm_dp_mst_connector_destroy(struct drm_connector *connector) | |||
| 98 | { | |||
| 99 | struct amdgpu_dm_connector *aconnector = | |||
| 100 | to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base ) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base ) );}); | |||
| 101 | ||||
| 102 | if (aconnector->dc_sink) { | |||
| 103 | dc_link_remove_remote_sink(aconnector->dc_link, | |||
| 104 | aconnector->dc_sink); | |||
| 105 | dc_sink_release(aconnector->dc_sink); | |||
| 106 | } | |||
| 107 | ||||
| 108 | kfree(aconnector->edid); | |||
| 109 | ||||
| 110 | drm_connector_cleanup(connector); | |||
| 111 | drm_dp_mst_put_port_malloc(aconnector->port); | |||
| 112 | kfree(aconnector); | |||
| 113 | } | |||
| 114 | ||||
| 115 | static int | |||
| 116 | amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) | |||
| 117 | { | |||
| 118 | struct amdgpu_dm_connector *amdgpu_dm_connector = | |||
| 119 | to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base ) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base ) );}); | |||
| 120 | int r; | |||
| 121 | ||||
| 122 | r = drm_dp_mst_connector_late_register(connector, | |||
| 123 | amdgpu_dm_connector->port); | |||
| 124 | if (r < 0) | |||
| 125 | return r; | |||
| 126 | ||||
| 127 | #if defined(CONFIG_DEBUG_FS) | |||
| 128 | connector_debugfs_init(amdgpu_dm_connector); | |||
| 129 | #endif | |||
| 130 | ||||
| 131 | return 0; | |||
| 132 | } | |||
| 133 | ||||
| 134 | static void | |||
| 135 | amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) | |||
| 136 | { | |||
| 137 | struct amdgpu_dm_connector *amdgpu_dm_connector = | |||
| 138 | to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base ) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base ) );}); | |||
| 139 | struct drm_dp_mst_port *port = amdgpu_dm_connector->port; | |||
| 140 | ||||
| 141 | drm_dp_mst_connector_early_unregister(connector, port); | |||
| 142 | } | |||
| 143 | ||||
| 144 | static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { | |||
| 145 | .fill_modes = drm_helper_probe_single_connector_modes, | |||
| 146 | .destroy = dm_dp_mst_connector_destroy, | |||
| 147 | .reset = amdgpu_dm_connector_funcs_reset, | |||
| 148 | .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, | |||
| 149 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | |||
| 150 | .atomic_set_property = amdgpu_dm_connector_atomic_set_property, | |||
| 151 | .atomic_get_property = amdgpu_dm_connector_atomic_get_property, | |||
| 152 | .late_register = amdgpu_dm_mst_connector_late_register, | |||
| 153 | .early_unregister = amdgpu_dm_mst_connector_early_unregister, | |||
| 154 | }; | |||
| 155 | ||||
| 156 | #if defined(CONFIG_DRM_AMD_DC_DCN1) | |||
| 157 | static bool_Bool needs_dsc_aux_workaround(struct dc_link *link) | |||
| 158 | { | |||
| 159 | if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC240x90CC24 && | |||
| 160 | (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && | |||
| 161 | link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) | |||
| 162 | return true1; | |||
| 163 | ||||
| 164 | return false0; | |||
| 165 | } | |||
| 166 | ||||
| 167 | static bool_Bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) | |||
| 168 | { | |||
| 169 | struct dc_sink *dc_sink = aconnector->dc_sink; | |||
| 170 | struct drm_dp_mst_port *port = aconnector->port; | |||
| 171 | u8 dsc_caps[16] = { 0 }; | |||
| 172 | ||||
| 173 | aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); | |||
| 174 | ||||
| 175 | /* | |||
| 176 | * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs | |||
| 177 | * because it only check the dsc/fec caps of the "port variable" and not the dock | |||
| 178 | * | |||
| 179 | * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display | |||
| 180 | * | |||
| 181 | * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux | |||
| 182 | * | |||
| 183 | */ | |||
| 184 | if (!aconnector->dsc_aux && !port->parent->port_parent && | |||
| 185 | needs_dsc_aux_workaround(aconnector->dc_link)) | |||
| 186 | aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; | |||
| 187 | ||||
| 188 | if (!aconnector->dsc_aux) | |||
| 189 | return false0; | |||
| 190 | ||||
| 191 | if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT0x060, dsc_caps, 16) < 0) | |||
| 192 | return false0; | |||
| 193 | ||||
| 194 | if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, | |||
| 195 | dsc_caps, NULL((void *)0), | |||
| 196 | &dc_sink->dsc_caps.dsc_dec_caps)) | |||
| 197 | return false0; | |||
| 198 | ||||
| 199 | return true1; | |||
| 200 | } | |||
| 201 | #endif | |||
| 202 | ||||
| 203 | static int dm_dp_mst_get_modes(struct drm_connector *connector) | |||
| 204 | { | |||
| 205 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base ) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base ) );}); | |||
| 206 | int ret = 0; | |||
| 207 | ||||
| 208 | if (!aconnector) | |||
| 209 | return drm_add_edid_modes(connector, NULL((void *)0)); | |||
| 210 | ||||
| 211 | if (!aconnector->edid) { | |||
| 212 | struct edid *edid; | |||
| 213 | edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); | |||
| 214 | ||||
| 215 | if (!edid) { | |||
| 216 | drm_connector_update_edid_property( | |||
| 217 | &aconnector->base, | |||
| 218 | NULL((void *)0)); | |||
| 219 | return ret; | |||
| 220 | } | |||
| 221 | ||||
| 222 | aconnector->edid = edid; | |||
| 223 | } | |||
| 224 | ||||
| 225 | if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) { | |||
| 226 | dc_sink_release(aconnector->dc_sink); | |||
| 227 | aconnector->dc_sink = NULL((void *)0); | |||
| 228 | } | |||
| 229 | ||||
| 230 | if (!aconnector->dc_sink) { | |||
| 231 | struct dc_sink *dc_sink; | |||
| 232 | struct dc_sink_init_data init_params = { | |||
| 233 | .link = aconnector->dc_link, | |||
| 234 | .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; | |||
| 235 | dc_sink = dc_link_add_remote_sink( | |||
| 236 | aconnector->dc_link, | |||
| 237 | (uint8_t *)aconnector->edid, | |||
| 238 | (aconnector->edid->extensions + 1) * EDID_LENGTH128, | |||
| 239 | &init_params); | |||
| 240 | ||||
| 241 | dc_sink->priv = aconnector; | |||
| 242 | /* dc_link_add_remote_sink returns a new reference */ | |||
| 243 | aconnector->dc_sink = dc_sink; | |||
| 244 | ||||
| 245 | if (aconnector->dc_sink) { | |||
| 246 | amdgpu_dm_update_freesync_caps( | |||
| 247 | connector, aconnector->edid); | |||
| 248 | ||||
| 249 | #if defined(CONFIG_DRM_AMD_DC_DCN1) | |||
| 250 | if (!validate_dsc_caps_on_connector(aconnector)) | |||
| 251 | memset(&aconnector->dc_sink->dsc_caps,__builtin_memset((&aconnector->dc_sink->dsc_caps), ( 0), (sizeof(aconnector->dc_sink->dsc_caps))) | |||
| 252 | 0, sizeof(aconnector->dc_sink->dsc_caps))__builtin_memset((&aconnector->dc_sink->dsc_caps), ( 0), (sizeof(aconnector->dc_sink->dsc_caps))); | |||
| 253 | #endif | |||
| 254 | } | |||
| 255 | } | |||
| 256 | ||||
| 257 | drm_connector_update_edid_property( | |||
| 258 | &aconnector->base, aconnector->edid); | |||
| 259 | ||||
| 260 | ret = drm_add_edid_modes(connector, aconnector->edid); | |||
| 261 | ||||
| 262 | return ret; | |||
| 263 | } | |||
| 264 | ||||
| 265 | static struct drm_encoder * | |||
| 266 | dm_mst_atomic_best_encoder(struct drm_connector *connector, | |||
| 267 | struct drm_connector_state *connector_state) | |||
| 268 | { | |||
| 269 | struct drm_device *dev = connector->dev; | |||
| 270 | struct amdgpu_device *adev = drm_to_adev(dev); | |||
| 271 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc)({ const __typeof( ((struct amdgpu_crtc *)0)->base ) *__mptr = (connector_state->crtc); (struct amdgpu_crtc *)( (char * )__mptr - __builtin_offsetof(struct amdgpu_crtc, base) );}); | |||
| 272 | ||||
| 273 | return &adev->dm.mst_encoders[acrtc->crtc_id].base; | |||
| 274 | } | |||
| 275 | ||||
| 276 | static int | |||
| 277 | dm_dp_mst_detect(struct drm_connector *connector, | |||
| 278 | struct drm_modeset_acquire_ctx *ctx, bool_Bool force) | |||
| 279 | { | |||
| 280 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base ) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base ) );}); | |||
| 281 | struct amdgpu_dm_connector *master = aconnector->mst_port; | |||
| 282 | ||||
| 283 | if (drm_connector_is_unregistered(connector)) | |||
| 284 | return connector_status_disconnected; | |||
| 285 | ||||
| 286 | return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, | |||
| 287 | aconnector->port); | |||
| 288 | } | |||
| 289 | ||||
| 290 | static int dm_dp_mst_atomic_check(struct drm_connector *connector, | |||
| 291 | struct drm_atomic_state *state) | |||
| 292 | { | |||
| 293 | struct drm_connector_state *new_conn_state = | |||
| 294 | drm_atomic_get_new_connector_state(state, connector); | |||
| 295 | struct drm_connector_state *old_conn_state = | |||
| 296 | drm_atomic_get_old_connector_state(state, connector); | |||
| 297 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector)({ const __typeof( ((struct amdgpu_dm_connector *)0)->base ) *__mptr = (connector); (struct amdgpu_dm_connector *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, base ) );}); | |||
| 298 | struct drm_crtc_state *new_crtc_state; | |||
| 299 | struct drm_dp_mst_topology_mgr *mst_mgr; | |||
| 300 | struct drm_dp_mst_port *mst_port; | |||
| 301 | ||||
| 302 | mst_port = aconnector->port; | |||
| 303 | mst_mgr = &aconnector->mst_port->mst_mgr; | |||
| 304 | ||||
| 305 | if (!old_conn_state->crtc) | |||
| 306 | return 0; | |||
| 307 | ||||
| 308 | if (new_conn_state->crtc) { | |||
| 309 | new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); | |||
| 310 | if (!new_crtc_state || | |||
| 311 | !drm_atomic_crtc_needs_modeset(new_crtc_state) || | |||
| 312 | new_crtc_state->enable) | |||
| 313 | return 0; | |||
| 314 | } | |||
| 315 | ||||
| 316 | return drm_dp_atomic_release_vcpi_slots(state, | |||
| 317 | mst_mgr, | |||
| 318 | mst_port); | |||
| 319 | } | |||
| 320 | ||||
| 321 | static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { | |||
| 322 | .get_modes = dm_dp_mst_get_modes, | |||
| 323 | .mode_valid = amdgpu_dm_connector_mode_valid, | |||
| 324 | .atomic_best_encoder = dm_mst_atomic_best_encoder, | |||
| 325 | .detect_ctx = dm_dp_mst_detect, | |||
| 326 | .atomic_check = dm_dp_mst_atomic_check, | |||
| 327 | }; | |||
| 328 | ||||
| 329 | static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) | |||
| 330 | { | |||
| 331 | drm_encoder_cleanup(encoder); | |||
| 332 | kfree(encoder); | |||
| 333 | } | |||
| 334 | ||||
| 335 | static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { | |||
| 336 | .destroy = amdgpu_dm_encoder_destroy, | |||
| 337 | }; | |||
| 338 | ||||
| 339 | void | |||
| 340 | dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev) | |||
| 341 | { | |||
| 342 | struct drm_device *dev = adev_to_drm(adev); | |||
| 343 | int i; | |||
| 344 | ||||
| 345 | for (i = 0; i < adev->dm.display_indexes_num; i++) { | |||
| 346 | struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i]; | |||
| 347 | struct drm_encoder *encoder = &amdgpu_encoder->base; | |||
| 348 | ||||
| 349 | encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); | |||
| 350 | ||||
| 351 | drm_encoder_init( | |||
| 352 | dev, | |||
| 353 | &amdgpu_encoder->base, | |||
| 354 | &amdgpu_dm_encoder_funcs, | |||
| 355 | DRM_MODE_ENCODER_DPMST7, | |||
| 356 | NULL((void *)0)); | |||
| 357 | ||||
| 358 | drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); | |||
| 359 | } | |||
| 360 | } | |||
| 361 | ||||
| 362 | static struct drm_connector * | |||
| 363 | dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | |||
| 364 | struct drm_dp_mst_port *port, | |||
| 365 | const char *pathprop) | |||
| 366 | { | |||
| 367 | struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr)({ const __typeof( ((struct amdgpu_dm_connector *)0)->mst_mgr ) *__mptr = (mgr); (struct amdgpu_dm_connector *)( (char *)__mptr - __builtin_offsetof(struct amdgpu_dm_connector, mst_mgr) ); }); | |||
| 368 | struct drm_device *dev = master->base.dev; | |||
| 369 | struct amdgpu_device *adev = drm_to_adev(dev); | |||
| 370 | struct amdgpu_dm_connector *aconnector; | |||
| 371 | struct drm_connector *connector; | |||
| 372 | int i; | |||
| 373 | ||||
| 374 | aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL(0x0001 | 0x0004)); | |||
| 375 | if (!aconnector) | |||
| 376 | return NULL((void *)0); | |||
| 377 | ||||
| 378 | connector = &aconnector->base; | |||
| 379 | aconnector->port = port; | |||
| 380 | aconnector->mst_port = master; | |||
| 381 | ||||
| 382 | if (drm_connector_init( | |||
| 383 | dev, | |||
| 384 | connector, | |||
| 385 | &dm_dp_mst_connector_funcs, | |||
| 386 | DRM_MODE_CONNECTOR_DisplayPort10)) { | |||
| 387 | kfree(aconnector); | |||
| 388 | return NULL((void *)0); | |||
| 389 | } | |||
| 390 | drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs); | |||
| 391 | ||||
| 392 | amdgpu_dm_connector_init_helper( | |||
| 393 | &adev->dm, | |||
| 394 | aconnector, | |||
| 395 | DRM_MODE_CONNECTOR_DisplayPort10, | |||
| 396 | master->dc_link, | |||
| 397 | master->connector_id); | |||
| 398 | ||||
| 399 | for (i = 0; i < adev->dm.display_indexes_num; i++) { | |||
| 400 | drm_connector_attach_encoder(&aconnector->base, | |||
| 401 | &adev->dm.mst_encoders[i].base); | |||
| 402 | } | |||
| 403 | ||||
| 404 | connector->max_bpc_property = master->base.max_bpc_property; | |||
| 405 | if (connector->max_bpc_property) | |||
| 406 | drm_connector_attach_max_bpc_property(connector, 8, 16); | |||
| 407 | ||||
| 408 | connector->vrr_capable_property = master->base.vrr_capable_property; | |||
| 409 | if (connector->vrr_capable_property) | |||
| 410 | drm_connector_attach_vrr_capable_property(connector); | |||
| 411 | ||||
| 412 | drm_object_attach_property( | |||
| 413 | &connector->base, | |||
| 414 | dev->mode_config.path_property, | |||
| 415 | 0); | |||
| 416 | drm_object_attach_property( | |||
| 417 | &connector->base, | |||
| 418 | dev->mode_config.tile_property, | |||
| 419 | 0); | |||
| 420 | ||||
| 421 | drm_connector_set_path_property(connector, pathprop); | |||
| 422 | ||||
| 423 | /* | |||
| 424 | * Initialize connector state before adding the connectror to drm and | |||
| 425 | * framebuffer lists | |||
| 426 | */ | |||
| 427 | amdgpu_dm_connector_funcs_reset(connector); | |||
| 428 | ||||
| 429 | drm_dp_mst_get_port_malloc(port); | |||
| 430 | ||||
| 431 | return connector; | |||
| 432 | } | |||
| 433 | ||||
| 434 | static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { | |||
| 435 | .add_connector = dm_dp_add_mst_connector, | |||
| 436 | }; | |||
| 437 | ||||
| 438 | void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, | |||
| 439 | struct amdgpu_dm_connector *aconnector, | |||
| 440 | int link_index) | |||
| 441 | { | |||
| 442 | aconnector->dm_dp_aux.aux.name = | |||
| 443 | kasprintf(GFP_KERNEL(0x0001 | 0x0004), "AMDGPU DM aux hw bus %d", | |||
| 444 | link_index); | |||
| 445 | aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; | |||
| 446 | aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc; | |||
| 447 | ||||
| 448 | drm_dp_aux_init(&aconnector->dm_dp_aux.aux); | |||
| 449 | drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, | |||
| 450 | &aconnector->base); | |||
| 451 | ||||
| 452 | if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP14) | |||
| 453 | return; | |||
| 454 | ||||
| 455 | aconnector->mst_mgr.cbs = &dm_mst_cbs; | |||
| 456 | drm_dp_mst_topology_mgr_init( | |||
| 457 | &aconnector->mst_mgr, | |||
| 458 | adev_to_drm(dm->adev), | |||
| 459 | &aconnector->dm_dp_aux.aux, | |||
| 460 | 16, | |||
| 461 | 4, | |||
| 462 | aconnector->connector_id); | |||
| 463 | ||||
| 464 | drm_connector_attach_dp_subconnector_property(&aconnector->base); | |||
| 465 | } | |||
| 466 | ||||
| 467 | int dm_mst_get_pbn_divider(struct dc_link *link) | |||
| 468 | { | |||
| 469 | if (!link) | |||
| 470 | return 0; | |||
| 471 | ||||
| 472 | return dc_link_bandwidth_kbps(link, | |||
| 473 | dc_link_get_link_cap(link)) / (8 * 1000 * 54); | |||
| 474 | } | |||
| 475 | ||||
| 476 | #if defined(CONFIG_DRM_AMD_DC_DCN1) | |||
| 477 | ||||
| 478 | struct dsc_mst_fairness_params { | |||
| 479 | struct dc_crtc_timing *timing; | |||
| 480 | struct dc_sink *sink; | |||
| 481 | struct dc_dsc_bw_range bw_range; | |||
| 482 | bool_Bool compression_possible; | |||
| 483 | struct drm_dp_mst_port *port; | |||
| 484 | enum dsc_clock_force_state clock_force_enable; | |||
| 485 | uint32_t num_slices_h; | |||
| 486 | uint32_t num_slices_v; | |||
| 487 | uint32_t bpp_overwrite; | |||
| 488 | }; | |||
| 489 | ||||
| 490 | struct dsc_mst_fairness_vars { | |||
| 491 | int pbn; | |||
| 492 | bool_Bool dsc_enabled; | |||
| 493 | int bpp_x16; | |||
| 494 | }; | |||
| 495 | ||||
| 496 | static int kbps_to_peak_pbn(int kbps) | |||
| 497 | { | |||
| 498 | u64 peak_kbps = kbps; | |||
| 499 | ||||
| 500 | peak_kbps *= 1006; | |||
| 501 | peak_kbps = div_u64(peak_kbps, 1000); | |||
| 502 | return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000))({ uint64_t _t = ((54 * 8 * 1000)); div64_u64((peak_kbps * 64 ) + _t - 1, _t); }); | |||
| 503 | } | |||
| 504 | ||||
| 505 | static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, | |||
| 506 | struct dsc_mst_fairness_vars *vars, | |||
| 507 | int count) | |||
| 508 | { | |||
| 509 | int i; | |||
| 510 | ||||
| 511 | for (i = 0; i < count; i++) { | |||
| 512 | memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg))__builtin_memset((¶ms[i].timing->dsc_cfg), (0), (sizeof (params[i].timing->dsc_cfg))); | |||
| 513 | if (vars[i].dsc_enabled && dc_dsc_compute_config( | |||
| 514 | params[i].sink->ctx->dc->res_pool->dscs[0], | |||
| 515 | ¶ms[i].sink->dsc_caps.dsc_dec_caps, | |||
| 516 | params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, | |||
| 517 | 0, | |||
| 518 | params[i].timing, | |||
| 519 | ¶ms[i].timing->dsc_cfg)) { | |||
| 520 | params[i].timing->flags.DSC = 1; | |||
| 521 | ||||
| 522 | if (params[i].bpp_overwrite) | |||
| 523 | params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; | |||
| 524 | else | |||
| 525 | params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16; | |||
| 526 | ||||
| 527 | if (params[i].num_slices_h) | |||
| 528 | params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h; | |||
| 529 | ||||
| 530 | if (params[i].num_slices_v) | |||
| 531 | params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v; | |||
| 532 | } else { | |||
| 533 | params[i].timing->flags.DSC = 0; | |||
| 534 | } | |||
| 535 | } | |||
| 536 | } | |||
| 537 | ||||
| 538 | static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) | |||
| 539 | { | |||
| 540 | struct dc_dsc_config dsc_config; | |||
| 541 | u64 kbps; | |||
| 542 | ||||
| 543 | kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); | |||
| 544 | dc_dsc_compute_config( | |||
| 545 | param.sink->ctx->dc->res_pool->dscs[0], | |||
| 546 | ¶m.sink->dsc_caps.dsc_dec_caps, | |||
| 547 | param.sink->ctx->dc->debug.dsc_min_slice_height_override, | |||
| 548 | (int) kbps, param.timing, &dsc_config); | |||
| 549 | ||||
| 550 | return dsc_config.bits_per_pixel; | |||
| 551 | } | |||
| 552 | ||||
| 553 | static void increase_dsc_bpp(struct drm_atomic_state *state, | |||
| 554 | struct dc_link *dc_link, | |||
| 555 | struct dsc_mst_fairness_params *params, | |||
| 556 | struct dsc_mst_fairness_vars *vars, | |||
| 557 | int count) | |||
| 558 | { | |||
| 559 | int i; | |||
| 560 | bool_Bool bpp_increased[MAX_PIPES6]; | |||
| 561 | int initial_slack[MAX_PIPES6]; | |||
| 562 | int min_initial_slack; | |||
| 563 | int next_index; | |||
| 564 | int remaining_to_increase = 0; | |||
| 565 | int pbn_per_timeslot; | |||
| 566 | int link_timeslots_used; | |||
| 567 | int fair_pbn_alloc; | |||
| 568 | ||||
| 569 | pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link); | |||
| 570 | ||||
| 571 | for (i = 0; i < count; i++) { | |||
| 572 | if (vars[i].dsc_enabled) { | |||
| 573 | initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn; | |||
| 574 | bpp_increased[i] = false0; | |||
| 575 | remaining_to_increase += 1; | |||
| 576 | } else { | |||
| 577 | initial_slack[i] = 0; | |||
| 578 | bpp_increased[i] = true1; | |||
| 579 | } | |||
| 580 | } | |||
| 581 | ||||
| 582 | while (remaining_to_increase) { | |||
| 583 | next_index = -1; | |||
| 584 | min_initial_slack = -1; | |||
| 585 | for (i = 0; i < count; i++) { | |||
| 586 | if (!bpp_increased[i]) { | |||
| 587 | if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) { | |||
| 588 | min_initial_slack = initial_slack[i]; | |||
| 589 | next_index = i; | |||
| 590 | } | |||
| 591 | } | |||
| 592 | } | |||
| 593 | ||||
| 594 | if (next_index == -1) | |||
| 595 | break; | |||
| 596 | ||||
| 597 | link_timeslots_used = 0; | |||
| 598 | ||||
| 599 | for (i = 0; i < count; i++) | |||
| 600 | link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot)(((vars[i].pbn) + ((pbn_per_timeslot) - 1)) / (pbn_per_timeslot )); | |||
| 601 | ||||
| 602 | fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; | |||
| 603 | ||||
| 604 | if (initial_slack[next_index] > fair_pbn_alloc) { | |||
| 605 | vars[next_index].pbn += fair_pbn_alloc; | |||
| 606 | if (drm_dp_atomic_find_vcpi_slots(state, | |||
| 607 | params[next_index].port->mgr, | |||
| 608 | params[next_index].port, | |||
| 609 | vars[next_index].pbn, | |||
| 610 | pbn_per_timeslot) < 0) | |||
| 611 | return; | |||
| 612 | if (!drm_dp_mst_atomic_check(state)) { | |||
| 613 | vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); | |||
| 614 | } else { | |||
| 615 | vars[next_index].pbn -= fair_pbn_alloc; | |||
| 616 | if (drm_dp_atomic_find_vcpi_slots(state, | |||
| 617 | params[next_index].port->mgr, | |||
| 618 | params[next_index].port, | |||
| 619 | vars[next_index].pbn, | |||
| 620 | pbn_per_timeslot) < 0) | |||
| 621 | return; | |||
| 622 | } | |||
| 623 | } else { | |||
| 624 | vars[next_index].pbn += initial_slack[next_index]; | |||
| 625 | if (drm_dp_atomic_find_vcpi_slots(state, | |||
| 626 | params[next_index].port->mgr, | |||
| 627 | params[next_index].port, | |||
| 628 | vars[next_index].pbn, | |||
| 629 | pbn_per_timeslot) < 0) | |||
| 630 | return; | |||
| 631 | if (!drm_dp_mst_atomic_check(state)) { | |||
| 632 | vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; | |||
| 633 | } else { | |||
| 634 | vars[next_index].pbn -= initial_slack[next_index]; | |||
| 635 | if (drm_dp_atomic_find_vcpi_slots(state, | |||
| 636 | params[next_index].port->mgr, | |||
| 637 | params[next_index].port, | |||
| 638 | vars[next_index].pbn, | |||
| 639 | pbn_per_timeslot) < 0) | |||
| 640 | return; | |||
| 641 | } | |||
| 642 | } | |||
| 643 | ||||
| 644 | bpp_increased[next_index] = true1; | |||
| 645 | remaining_to_increase--; | |||
| 646 | } | |||
| 647 | } | |||
| 648 | ||||
| 649 | static void try_disable_dsc(struct drm_atomic_state *state, | |||
| 650 | struct dc_link *dc_link, | |||
| 651 | struct dsc_mst_fairness_params *params, | |||
| 652 | struct dsc_mst_fairness_vars *vars, | |||
| 653 | int count) | |||
| 654 | { | |||
| 655 | int i; | |||
| 656 | bool_Bool tried[MAX_PIPES6]; | |||
| 657 | int kbps_increase[MAX_PIPES6]; | |||
| 658 | int max_kbps_increase; | |||
| 659 | int next_index; | |||
| 660 | int remaining_to_try = 0; | |||
| 661 | ||||
| 662 | for (i = 0; i < count; i++) { | |||
| 663 | if (vars[i].dsc_enabled | |||
| 664 | && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16 | |||
| 665 | && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { | |||
| 666 | kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; | |||
| 667 | tried[i] = false0; | |||
| 668 | remaining_to_try += 1; | |||
| 669 | } else { | |||
| 670 | kbps_increase[i] = 0; | |||
| 671 | tried[i] = true1; | |||
| 672 | } | |||
| 673 | } | |||
| 674 | ||||
| 675 | while (remaining_to_try) { | |||
| 676 | next_index = -1; | |||
| 677 | max_kbps_increase = -1; | |||
| 678 | for (i = 0; i < count; i++) { | |||
| 679 | if (!tried[i]) { | |||
| 680 | if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) { | |||
| 681 | max_kbps_increase = kbps_increase[i]; | |||
| 682 | next_index = i; | |||
| 683 | } | |||
| 684 | } | |||
| 685 | } | |||
| 686 | ||||
| 687 | if (next_index == -1) | |||
| 688 | break; | |||
| 689 | ||||
| 690 | vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); | |||
| 691 | if (drm_dp_atomic_find_vcpi_slots(state, | |||
| 692 | params[next_index].port->mgr, | |||
| 693 | params[next_index].port, | |||
| 694 | vars[next_index].pbn, | |||
| 695 | dm_mst_get_pbn_divider(dc_link)) < 0) | |||
| 696 | return; | |||
| 697 | ||||
| 698 | if (!drm_dp_mst_atomic_check(state)) { | |||
| 699 | vars[next_index].dsc_enabled = false0; | |||
| 700 | vars[next_index].bpp_x16 = 0; | |||
| 701 | } else { | |||
| 702 | vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); | |||
| 703 | if (drm_dp_atomic_find_vcpi_slots(state, | |||
| 704 | params[next_index].port->mgr, | |||
| 705 | params[next_index].port, | |||
| 706 | vars[next_index].pbn, | |||
| 707 | dm_mst_get_pbn_divider(dc_link)) < 0) | |||
| 708 | return; | |||
| 709 | } | |||
| 710 | ||||
| 711 | tried[next_index] = true1; | |||
| 712 | remaining_to_try--; | |||
| 713 | } | |||
| 714 | } | |||
| 715 | ||||
| 716 | static bool_Bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, | |||
| 717 | struct dc_state *dc_state, | |||
| 718 | struct dc_link *dc_link) | |||
| 719 | { | |||
| 720 | int i; | |||
| 721 | struct dc_stream_state *stream; | |||
| 722 | struct dsc_mst_fairness_params params[MAX_PIPES6]; | |||
| 723 | struct dsc_mst_fairness_vars vars[MAX_PIPES6]; | |||
| 724 | struct amdgpu_dm_connector *aconnector; | |||
| 725 | int count = 0; | |||
| 726 | bool_Bool debugfs_overwrite = false0; | |||
| 727 | ||||
| 728 | memset(params, 0, sizeof(params))__builtin_memset((params), (0), (sizeof(params))); | |||
| 729 | ||||
| 730 | /* Set up params */ | |||
| 731 | for (i = 0; i < dc_state->stream_count; i++) { | |||
| 732 | struct dc_dsc_policy dsc_policy = {0}; | |||
| 733 | ||||
| 734 | stream = dc_state->streams[i]; | |||
| 735 | ||||
| 736 | if (stream->link != dc_link) | |||
| 737 | continue; | |||
| 738 | ||||
| 739 | stream->timing.flags.DSC = 0; | |||
| 740 | ||||
| 741 | params[count].timing = &stream->timing; | |||
| 742 | params[count].sink = stream->sink; | |||
| 743 | aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; | |||
| 744 | params[count].port = aconnector->port; | |||
| 745 | params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; | |||
| 746 | if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) | |||
| 747 | debugfs_overwrite = true1; | |||
| 748 | params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; | |||
| 749 | params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; | |||
| 750 | params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; | |||
| 751 | params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; | |||
| 752 | dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy); | |||
| 753 | if (!dc_dsc_compute_bandwidth_range( | |||
| 754 | stream->sink->ctx->dc->res_pool->dscs[0], | |||
| 755 | stream->sink->ctx->dc->debug.dsc_min_slice_height_override, | |||
| 756 | dsc_policy.min_target_bpp, | |||
| 757 | dsc_policy.max_target_bpp, | |||
| 758 | &stream->sink->dsc_caps.dsc_dec_caps, | |||
| 759 | &stream->timing, ¶ms[count].bw_range)) | |||
| 760 | params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); | |||
| 761 | ||||
| 762 | count++; | |||
| 763 | } | |||
| 764 | /* Try no compression */ | |||
| 765 | for (i = 0; i < count; i++) { | |||
| 766 | vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); | |||
| 767 | vars[i].dsc_enabled = false0; | |||
| 768 | vars[i].bpp_x16 = 0; | |||
| 769 | if (drm_dp_atomic_find_vcpi_slots(state, | |||
| 770 | params[i].port->mgr, | |||
| 771 | params[i].port, | |||
| 772 | vars[i].pbn, | |||
| 773 | dm_mst_get_pbn_divider(dc_link)) < 0) | |||
| 774 | return false0; | |||
| 775 | } | |||
| 776 | if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { | |||
| 777 | set_dsc_configs_from_fairness_vars(params, vars, count); | |||
| 778 | return true1; | |||
| 779 | } | |||
| 780 | ||||
| 781 | /* Try max compression */ | |||
| 782 | for (i = 0; i < count; i++) { | |||
| 783 | if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { | |||
| 784 | vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); | |||
| 785 | vars[i].dsc_enabled = true1; | |||
| 786 | vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16; | |||
| 787 | if (drm_dp_atomic_find_vcpi_slots(state, | |||
| 788 | params[i].port->mgr, | |||
| 789 | params[i].port, | |||
| 790 | vars[i].pbn, | |||
| 791 | dm_mst_get_pbn_divider(dc_link)) < 0) | |||
| 792 | return false0; | |||
| 793 | } else { | |||
| 794 | vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); | |||
| 795 | vars[i].dsc_enabled = false0; | |||
| 796 | vars[i].bpp_x16 = 0; | |||
| 797 | if (drm_dp_atomic_find_vcpi_slots(state, | |||
| 798 | params[i].port->mgr, | |||
| 799 | params[i].port, | |||
| 800 | vars[i].pbn, | |||
| 801 | dm_mst_get_pbn_divider(dc_link)) < 0) | |||
| 802 | return false0; | |||
| 803 | } | |||
| 804 | } | |||
| 805 | if (drm_dp_mst_atomic_check(state)) | |||
| 806 | return false0; | |||
| 807 | ||||
| 808 | /* Optimize degree of compression */ | |||
| 809 | increase_dsc_bpp(state, dc_link, params, vars, count); | |||
| 810 | ||||
| 811 | try_disable_dsc(state, dc_link, params, vars, count); | |||
| 812 | ||||
| 813 | set_dsc_configs_from_fairness_vars(params, vars, count); | |||
| 814 | ||||
| 815 | return true1; | |||
| 816 | } | |||
| 817 | ||||
| 818 | bool_Bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, | |||
| 819 | struct dc_state *dc_state) | |||
| 820 | { | |||
| 821 | int i, j; | |||
| 822 | struct dc_stream_state *stream; | |||
| 823 | bool_Bool computed_streams[MAX_PIPES6]; | |||
| 824 | struct amdgpu_dm_connector *aconnector; | |||
| 825 | ||||
| 826 | for (i = 0; i < dc_state->stream_count; i++) | |||
| ||||
| 827 | computed_streams[i] = false0; | |||
| 828 | ||||
| 829 | for (i = 0; i < dc_state->stream_count; i++) { | |||
| 830 | stream = dc_state->streams[i]; | |||
| 831 | ||||
| 832 | if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) | |||
| 833 | continue; | |||
| 834 | ||||
| 835 | aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; | |||
| 836 | ||||
| 837 | if (!aconnector || !aconnector->dc_sink) | |||
| 838 | continue; | |||
| 839 | ||||
| 840 | if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) | |||
| 841 | continue; | |||
| 842 | ||||
| 843 | if (computed_streams[i]) | |||
| ||||
| 844 | continue; | |||
| 845 | ||||
| 846 | if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) | |||
| 847 | return false0; | |||
| 848 | ||||
| 849 | mutex_lock(&aconnector->mst_mgr.lock)rw_enter_write(&aconnector->mst_mgr.lock); | |||
| 850 | if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) { | |||
| 851 | mutex_unlock(&aconnector->mst_mgr.lock)rw_exit_write(&aconnector->mst_mgr.lock); | |||
| 852 | return false0; | |||
| 853 | } | |||
| 854 | mutex_unlock(&aconnector->mst_mgr.lock)rw_exit_write(&aconnector->mst_mgr.lock); | |||
| 855 | ||||
| 856 | for (j = 0; j < dc_state->stream_count; j++) { | |||
| 857 | if (dc_state->streams[j]->link == stream->link) | |||
| 858 | computed_streams[j] = true1; | |||
| 859 | } | |||
| 860 | } | |||
| 861 | ||||
| 862 | for (i = 0; i < dc_state->stream_count; i++) { | |||
| 863 | stream = dc_state->streams[i]; | |||
| 864 | ||||
| 865 | if (stream->timing.flags.DSC == 1) | |||
| 866 | if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) | |||
| 867 | return false0; | |||
| 868 | } | |||
| 869 | ||||
| 870 | return true1; | |||
| 871 | } | |||
| 872 | ||||
| 873 | #endif |