| File: | dev/pci/drm/i915/gt/uc/intel_uc.c |
| Warning: | line 436, column 27 Value stored to 'i915' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * Copyright © 2016-2019 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #include <linux/string_helpers.h> |
| 7 | |
| 8 | #include "gt/intel_gt.h" |
| 9 | #include "gt/intel_reset.h" |
| 10 | #include "intel_guc.h" |
| 11 | #include "intel_guc_ads.h" |
| 12 | #include "intel_guc_submission.h" |
| 13 | #include "gt/intel_rps.h" |
| 14 | #include "intel_uc.h" |
| 15 | |
| 16 | #include "i915_drv.h" |
| 17 | |
| 18 | static const struct intel_uc_ops uc_ops_off; |
| 19 | static const struct intel_uc_ops uc_ops_on; |
| 20 | |
| 21 | static void uc_expand_default_options(struct intel_uc *uc) |
| 22 | { |
| 23 | struct drm_i915_privateinteldrm_softc *i915 = uc_to_gt(uc)->i915; |
| 24 | |
| 25 | if (i915->params.enable_guc != -1) |
| 26 | return; |
| 27 | |
| 28 | /* Don't enable GuC/HuC on pre-Gen12 */ |
| 29 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) < 12) { |
| 30 | i915->params.enable_guc = 0; |
| 31 | return; |
| 32 | } |
| 33 | |
| 34 | /* Don't enable GuC/HuC on older Gen12 platforms */ |
| 35 | if (IS_TIGERLAKE(i915)IS_PLATFORM(i915, INTEL_TIGERLAKE) || IS_ROCKETLAKE(i915)IS_PLATFORM(i915, INTEL_ROCKETLAKE)) { |
| 36 | i915->params.enable_guc = 0; |
| 37 | return; |
| 38 | } |
| 39 | |
| 40 | /* Intermediate platforms are HuC authentication only */ |
| 41 | if (IS_ALDERLAKE_S(i915)IS_PLATFORM(i915, INTEL_ALDERLAKE_S) && !IS_ADLS_RPLS(i915)IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_S, 0)) { |
| 42 | i915->params.enable_guc = ENABLE_GUC_LOAD_HUC(1UL << (1)); |
| 43 | return; |
| 44 | } |
| 45 | |
| 46 | /* Default: enable HuC authentication and GuC submission */ |
| 47 | i915->params.enable_guc = ENABLE_GUC_LOAD_HUC(1UL << (1)) | ENABLE_GUC_SUBMISSION(1UL << (0)); |
| 48 | |
| 49 | /* XEHPSDV and PVC do not use HuC */ |
| 50 | if (IS_XEHPSDV(i915)IS_PLATFORM(i915, INTEL_XEHPSDV) || IS_PONTEVECCHIO(i915)IS_PLATFORM(i915, INTEL_PONTEVECCHIO)) |
| 51 | i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC(1UL << (1)); |
| 52 | } |
| 53 | |
| 54 | /* Reset GuC providing us with fresh state for both GuC and HuC. |
| 55 | */ |
| 56 | static int __intel_uc_reset_hw(struct intel_uc *uc) |
| 57 | { |
| 58 | struct intel_gt *gt = uc_to_gt(uc); |
| 59 | int ret; |
| 60 | u32 guc_status; |
| 61 | |
| 62 | ret = i915_inject_probe_error(gt->i915, -ENXIO)({ ((void)0); 0; }); |
| 63 | if (ret) |
| 64 | return ret; |
| 65 | |
| 66 | ret = intel_reset_guc(gt); |
| 67 | if (ret) { |
| 68 | DRM_ERROR("Failed to reset GuC, ret = %d\n", ret)__drm_err("Failed to reset GuC, ret = %d\n", ret); |
| 69 | return ret; |
| 70 | } |
| 71 | |
| 72 | guc_status = intel_uncore_read(gt->uncore, GUC_STATUS((const i915_reg_t){ .reg = (0xc000) })); |
| 73 | WARN(!(guc_status & GS_MIA_IN_RESET),({ int __ret = !!(!(guc_status & (0x01 << 0))); if ( __ret) printf("GuC status: 0x%x, MIA core expected to be in reset\n" , guc_status); __builtin_expect(!!(__ret), 0); }) |
| 74 | "GuC status: 0x%x, MIA core expected to be in reset\n",({ int __ret = !!(!(guc_status & (0x01 << 0))); if ( __ret) printf("GuC status: 0x%x, MIA core expected to be in reset\n" , guc_status); __builtin_expect(!!(__ret), 0); }) |
| 75 | guc_status)({ int __ret = !!(!(guc_status & (0x01 << 0))); if ( __ret) printf("GuC status: 0x%x, MIA core expected to be in reset\n" , guc_status); __builtin_expect(!!(__ret), 0); }); |
| 76 | |
| 77 | return ret; |
| 78 | } |
| 79 | |
| 80 | static void __confirm_options(struct intel_uc *uc) |
| 81 | { |
| 82 | struct drm_i915_privateinteldrm_softc *i915 = uc_to_gt(uc)->i915; |
| 83 | |
| 84 | drm_dbg(&i915->drm,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n" , i915->params.enable_guc, str_yes_no(intel_uc_wants_guc(uc )), str_yes_no(intel_uc_wants_guc_submission(uc)), str_yes_no (intel_uc_wants_huc(uc)), str_yes_no(intel_uc_wants_guc_slpc( uc))) |
| 85 | "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n" , i915->params.enable_guc, str_yes_no(intel_uc_wants_guc(uc )), str_yes_no(intel_uc_wants_guc_submission(uc)), str_yes_no (intel_uc_wants_huc(uc)), str_yes_no(intel_uc_wants_guc_slpc( uc))) |
| 86 | i915->params.enable_guc,__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n" , i915->params.enable_guc, str_yes_no(intel_uc_wants_guc(uc )), str_yes_no(intel_uc_wants_guc_submission(uc)), str_yes_no (intel_uc_wants_huc(uc)), str_yes_no(intel_uc_wants_guc_slpc( uc))) |
| 87 | str_yes_no(intel_uc_wants_guc(uc)),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n" , i915->params.enable_guc, str_yes_no(intel_uc_wants_guc(uc )), str_yes_no(intel_uc_wants_guc_submission(uc)), str_yes_no (intel_uc_wants_huc(uc)), str_yes_no(intel_uc_wants_guc_slpc( uc))) |
| 88 | str_yes_no(intel_uc_wants_guc_submission(uc)),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n" , i915->params.enable_guc, str_yes_no(intel_uc_wants_guc(uc )), str_yes_no(intel_uc_wants_guc_submission(uc)), str_yes_no (intel_uc_wants_huc(uc)), str_yes_no(intel_uc_wants_guc_slpc( uc))) |
| 89 | str_yes_no(intel_uc_wants_huc(uc)),__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n" , i915->params.enable_guc, str_yes_no(intel_uc_wants_guc(uc )), str_yes_no(intel_uc_wants_guc_submission(uc)), str_yes_no (intel_uc_wants_huc(uc)), str_yes_no(intel_uc_wants_guc_slpc( uc))) |
| 90 | str_yes_no(intel_uc_wants_guc_slpc(uc)))__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n" , i915->params.enable_guc, str_yes_no(intel_uc_wants_guc(uc )), str_yes_no(intel_uc_wants_guc_submission(uc)), str_yes_no (intel_uc_wants_huc(uc)), str_yes_no(intel_uc_wants_guc_slpc( uc))); |
| 91 | |
| 92 | if (i915->params.enable_guc == 0) { |
| 93 | GEM_BUG_ON(intel_uc_wants_guc(uc))((void)0); |
| 94 | GEM_BUG_ON(intel_uc_wants_guc_submission(uc))((void)0); |
| 95 | GEM_BUG_ON(intel_uc_wants_huc(uc))((void)0); |
| 96 | GEM_BUG_ON(intel_uc_wants_guc_slpc(uc))((void)0); |
| 97 | return; |
| 98 | } |
| 99 | |
| 100 | if (!intel_uc_supports_guc(uc)) |
| 101 | drm_info(&i915->drm,do { } while(0) |
| 102 | "Incompatible option enable_guc=%d - %s\n",do { } while(0) |
| 103 | i915->params.enable_guc, "GuC is not supported!")do { } while(0); |
| 104 | |
| 105 | if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC(1UL << (1)) && |
| 106 | !intel_uc_supports_huc(uc)) |
| 107 | drm_info(&i915->drm,do { } while(0) |
| 108 | "Incompatible option enable_guc=%d - %s\n",do { } while(0) |
| 109 | i915->params.enable_guc, "HuC is not supported!")do { } while(0); |
| 110 | |
| 111 | if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION(1UL << (0)) && |
| 112 | !intel_uc_supports_guc_submission(uc)) |
| 113 | drm_info(&i915->drm,do { } while(0) |
| 114 | "Incompatible option enable_guc=%d - %s\n",do { } while(0) |
| 115 | i915->params.enable_guc, "GuC submission is N/A")do { } while(0); |
| 116 | |
| 117 | if (i915->params.enable_guc & ~ENABLE_GUC_MASK(((~0UL) >> (64 - (1) - 1)) & ((~0UL) << (0)) )) |
| 118 | drm_info(&i915->drm,do { } while(0) |
| 119 | "Incompatible option enable_guc=%d - %s\n",do { } while(0) |
| 120 | i915->params.enable_guc, "undocumented flag")do { } while(0); |
| 121 | } |
| 122 | |
| 123 | void intel_uc_init_early(struct intel_uc *uc) |
| 124 | { |
| 125 | uc_expand_default_options(uc); |
| 126 | |
| 127 | intel_guc_init_early(&uc->guc); |
| 128 | intel_huc_init_early(&uc->huc); |
| 129 | |
| 130 | __confirm_options(uc); |
| 131 | |
| 132 | if (intel_uc_wants_guc(uc)) |
| 133 | uc->ops = &uc_ops_on; |
| 134 | else |
| 135 | uc->ops = &uc_ops_off; |
| 136 | } |
| 137 | |
| 138 | void intel_uc_init_late(struct intel_uc *uc) |
| 139 | { |
| 140 | intel_guc_init_late(&uc->guc); |
| 141 | } |
| 142 | |
| 143 | void intel_uc_driver_late_release(struct intel_uc *uc) |
| 144 | { |
| 145 | } |
| 146 | |
| 147 | /** |
| 148 | * intel_uc_init_mmio - setup uC MMIO access |
| 149 | * @uc: the intel_uc structure |
| 150 | * |
| 151 | * Setup minimal state necessary for MMIO accesses later in the |
| 152 | * initialization sequence. |
| 153 | */ |
| 154 | void intel_uc_init_mmio(struct intel_uc *uc) |
| 155 | { |
| 156 | intel_guc_init_send_regs(&uc->guc); |
| 157 | } |
| 158 | |
| 159 | static void __uc_capture_load_err_log(struct intel_uc *uc) |
| 160 | { |
| 161 | struct intel_guc *guc = &uc->guc; |
| 162 | |
| 163 | if (guc->log.vma && !uc->load_err_log) |
| 164 | uc->load_err_log = i915_gem_object_get(guc->log.vma->obj); |
| 165 | } |
| 166 | |
| 167 | static void __uc_free_load_err_log(struct intel_uc *uc) |
| 168 | { |
| 169 | struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log)({ typeof(*&uc->load_err_log) __T = *(&uc->load_err_log ); *(&uc->load_err_log) = (typeof(*&uc->load_err_log ))0; __T; }); |
| 170 | |
| 171 | if (log) |
| 172 | i915_gem_object_put(log); |
| 173 | } |
| 174 | |
| 175 | void intel_uc_driver_remove(struct intel_uc *uc) |
| 176 | { |
| 177 | intel_uc_fini_hw(uc); |
| 178 | intel_uc_fini(uc); |
| 179 | __uc_free_load_err_log(uc); |
| 180 | } |
| 181 | |
| 182 | /* |
| 183 | * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 |
| 184 | * register using the same bits used in the CT message payload. Since our |
| 185 | * communication channel with guc is turned off at this point, we can save the |
| 186 | * message and handle it after we turn it back on. |
| 187 | */ |
| 188 | static void guc_clear_mmio_msg(struct intel_guc *guc) |
| 189 | { |
| 190 | intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15)((const i915_reg_t){ .reg = (0xc180 + (15) * 4) }), 0); |
| 191 | } |
| 192 | |
| 193 | static void guc_get_mmio_msg(struct intel_guc *guc) |
| 194 | { |
| 195 | u32 val; |
| 196 | |
| 197 | spin_lock_irq(&guc->irq_lock)mtx_enter(&guc->irq_lock); |
| 198 | |
| 199 | val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15)((const i915_reg_t){ .reg = (0xc180 + (15) * 4) })); |
| 200 | guc->mmio_msg |= val & guc->msg_enabled_mask; |
| 201 | |
| 202 | /* |
| 203 | * clear all events, including the ones we're not currently servicing, |
| 204 | * to make sure we don't try to process a stale message if we enable |
| 205 | * handling of more events later. |
| 206 | */ |
| 207 | guc_clear_mmio_msg(guc); |
| 208 | |
| 209 | spin_unlock_irq(&guc->irq_lock)mtx_leave(&guc->irq_lock); |
| 210 | } |
| 211 | |
| 212 | static void guc_handle_mmio_msg(struct intel_guc *guc) |
| 213 | { |
| 214 | /* we need communication to be enabled to reply to GuC */ |
| 215 | GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct))((void)0); |
| 216 | |
| 217 | spin_lock_irq(&guc->irq_lock)mtx_enter(&guc->irq_lock); |
| 218 | if (guc->mmio_msg) { |
| 219 | intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1); |
| 220 | guc->mmio_msg = 0; |
| 221 | } |
| 222 | spin_unlock_irq(&guc->irq_lock)mtx_leave(&guc->irq_lock); |
| 223 | } |
| 224 | |
| 225 | static int guc_enable_communication(struct intel_guc *guc) |
| 226 | { |
| 227 | struct intel_gt *gt = guc_to_gt(guc); |
| 228 | struct drm_i915_privateinteldrm_softc *i915 = gt->i915; |
| 229 | int ret; |
| 230 | |
| 231 | GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct))((void)0); |
| 232 | |
| 233 | ret = i915_inject_probe_error(i915, -ENXIO)({ ((void)0); 0; }); |
| 234 | if (ret) |
| 235 | return ret; |
| 236 | |
| 237 | ret = intel_guc_ct_enable(&guc->ct); |
| 238 | if (ret) |
| 239 | return ret; |
| 240 | |
| 241 | /* check for mmio messages received before/during the CT enable */ |
| 242 | guc_get_mmio_msg(guc); |
| 243 | guc_handle_mmio_msg(guc); |
| 244 | |
| 245 | intel_guc_enable_interrupts(guc); |
| 246 | |
| 247 | /* check for CT messages received before we enabled interrupts */ |
| 248 | spin_lock_irq(gt->irq_lock)mtx_enter(gt->irq_lock); |
| 249 | intel_guc_ct_event_handler(&guc->ct); |
| 250 | spin_unlock_irq(gt->irq_lock)mtx_leave(gt->irq_lock); |
| 251 | |
| 252 | drm_dbg(&i915->drm, "GuC communication enabled\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "GuC communication enabled\n" ); |
| 253 | |
| 254 | return 0; |
| 255 | } |
| 256 | |
| 257 | static void guc_disable_communication(struct intel_guc *guc) |
| 258 | { |
| 259 | struct drm_i915_privateinteldrm_softc *i915 = guc_to_gt(guc)->i915; |
| 260 | |
| 261 | /* |
| 262 | * Events generated during or after CT disable are logged by guc in |
| 263 | * via mmio. Make sure the register is clear before disabling CT since |
| 264 | * all events we cared about have already been processed via CT. |
| 265 | */ |
| 266 | guc_clear_mmio_msg(guc); |
| 267 | |
| 268 | intel_guc_disable_interrupts(guc); |
| 269 | |
| 270 | intel_guc_ct_disable(&guc->ct); |
| 271 | |
| 272 | /* |
| 273 | * Check for messages received during/after the CT disable. We do not |
| 274 | * expect any messages to have arrived via CT between the interrupt |
| 275 | * disable and the CT disable because GuC should've been idle until we |
| 276 | * triggered the CT disable protocol. |
| 277 | */ |
| 278 | guc_get_mmio_msg(guc); |
| 279 | |
| 280 | drm_dbg(&i915->drm, "GuC communication disabled\n")__drm_dev_dbg(((void *)0), (&i915->drm) ? (&i915-> drm)->dev : ((void *)0), DRM_UT_DRIVER, "GuC communication disabled\n" ); |
| 281 | } |
| 282 | |
| 283 | static void __uc_fetch_firmwares(struct intel_uc *uc) |
| 284 | { |
| 285 | int err; |
| 286 | |
| 287 | GEM_BUG_ON(!intel_uc_wants_guc(uc))((void)0); |
| 288 | |
| 289 | err = intel_uc_fw_fetch(&uc->guc.fw); |
| 290 | if (err) { |
| 291 | /* Make sure we transition out of transient "SELECTED" state */ |
| 292 | if (intel_uc_wants_huc(uc)) { |
| 293 | drm_dbg(&uc_to_gt(uc)->i915->drm,__drm_dev_dbg(((void *)0), (&uc_to_gt(uc)->i915->drm ) ? (&uc_to_gt(uc)->i915->drm)->dev : ((void *)0 ), DRM_UT_DRIVER, "Failed to fetch GuC: %d disabling HuC\n", err ) |
| 294 | "Failed to fetch GuC: %d disabling HuC\n", err)__drm_dev_dbg(((void *)0), (&uc_to_gt(uc)->i915->drm ) ? (&uc_to_gt(uc)->i915->drm)->dev : ((void *)0 ), DRM_UT_DRIVER, "Failed to fetch GuC: %d disabling HuC\n", err ); |
| 295 | intel_uc_fw_change_status(&uc->huc.fw, |
| 296 | INTEL_UC_FIRMWARE_ERROR); |
| 297 | } |
| 298 | |
| 299 | return; |
| 300 | } |
| 301 | |
| 302 | if (intel_uc_wants_huc(uc)) |
| 303 | intel_uc_fw_fetch(&uc->huc.fw); |
| 304 | } |
| 305 | |
| 306 | static void __uc_cleanup_firmwares(struct intel_uc *uc) |
| 307 | { |
| 308 | intel_uc_fw_cleanup_fetch(&uc->huc.fw); |
| 309 | intel_uc_fw_cleanup_fetch(&uc->guc.fw); |
| 310 | } |
| 311 | |
| 312 | static int __uc_init(struct intel_uc *uc) |
| 313 | { |
| 314 | struct intel_guc *guc = &uc->guc; |
| 315 | struct intel_huc *huc = &uc->huc; |
| 316 | int ret; |
| 317 | |
| 318 | GEM_BUG_ON(!intel_uc_wants_guc(uc))((void)0); |
| 319 | |
| 320 | if (!intel_uc_uses_guc(uc)) |
| 321 | return 0; |
| 322 | |
| 323 | if (i915_inject_probe_failure(uc_to_gt(uc)->i915)({ ((void)0); 0; })) |
| 324 | return -ENOMEM12; |
| 325 | |
| 326 | ret = intel_guc_init(guc); |
| 327 | if (ret) |
| 328 | return ret; |
| 329 | |
| 330 | if (intel_uc_uses_huc(uc)) |
| 331 | intel_huc_init(huc); |
| 332 | |
| 333 | return 0; |
| 334 | } |
| 335 | |
| 336 | static void __uc_fini(struct intel_uc *uc) |
| 337 | { |
| 338 | intel_huc_fini(&uc->huc); |
| 339 | intel_guc_fini(&uc->guc); |
| 340 | } |
| 341 | |
| 342 | static int __uc_sanitize(struct intel_uc *uc) |
| 343 | { |
| 344 | struct intel_guc *guc = &uc->guc; |
| 345 | struct intel_huc *huc = &uc->huc; |
| 346 | |
| 347 | GEM_BUG_ON(!intel_uc_supports_guc(uc))((void)0); |
| 348 | |
| 349 | intel_huc_sanitize(huc); |
| 350 | intel_guc_sanitize(guc); |
| 351 | |
| 352 | return __intel_uc_reset_hw(uc); |
| 353 | } |
| 354 | |
| 355 | /* Initialize and verify the uC regs related to uC positioning in WOPCM */ |
| 356 | static int uc_init_wopcm(struct intel_uc *uc) |
| 357 | { |
| 358 | struct intel_gt *gt = uc_to_gt(uc); |
| 359 | struct intel_uncore *uncore = gt->uncore; |
| 360 | u32 base = intel_wopcm_guc_base(>->i915->wopcm); |
| 361 | u32 size = intel_wopcm_guc_size(>->i915->wopcm); |
| 362 | u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC(1<<1) : 0; |
| 363 | u32 mask; |
| 364 | int err; |
| 365 | |
| 366 | if (unlikely(!base || !size)__builtin_expect(!!(!base || !size), 0)) { |
| 367 | i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n")__i915_printk(gt->i915, 0 ? "\0017" : "\0013", "Unsuccessful WOPCM partitioning\n" ); |
| 368 | return -E2BIG7; |
| 369 | } |
| 370 | |
| 371 | GEM_BUG_ON(!intel_uc_supports_guc(uc))((void)0); |
| 372 | GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK))((void)0); |
| 373 | GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK)((void)0); |
| 374 | GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK))((void)0); |
| 375 | GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK)((void)0); |
| 376 | |
| 377 | err = i915_inject_probe_error(gt->i915, -ENXIO)({ ((void)0); 0; }); |
| 378 | if (err) |
| 379 | return err; |
| 380 | |
| 381 | mask = GUC_WOPCM_SIZE_MASK(0xfffff << 12) | GUC_WOPCM_SIZE_LOCKED(1<<0); |
| 382 | err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE((const i915_reg_t){ .reg = (0xc050) }), size, mask, |
| 383 | size | GUC_WOPCM_SIZE_LOCKED(1<<0)); |
| 384 | if (err) |
| 385 | goto err_out; |
| 386 | |
| 387 | mask = GUC_WOPCM_OFFSET_MASK(0x3ffff << 14) | GUC_WOPCM_OFFSET_VALID(1<<0) | huc_agent; |
| 388 | err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET((const i915_reg_t){ .reg = (0xc340) }), |
| 389 | base | huc_agent, mask, |
| 390 | base | huc_agent | |
| 391 | GUC_WOPCM_OFFSET_VALID(1<<0)); |
| 392 | if (err) |
| 393 | goto err_out; |
| 394 | |
| 395 | return 0; |
| 396 | |
| 397 | err_out: |
| 398 | i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n")__i915_printk(gt->i915, 0 ? "\0017" : "\0013", "Failed to init uC WOPCM registers!\n" ); |
| 399 | i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",__i915_printk(gt->i915, 0 ? "\0017" : "\0013", "%s(%#x)=%#x\n" , "DMA_GUC_WOPCM_OFFSET", i915_mmio_reg_offset(((const i915_reg_t ){ .reg = (0xc340) })), intel_uncore_read(uncore, ((const i915_reg_t ){ .reg = (0xc340) }))) |
| 400 | i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),__i915_printk(gt->i915, 0 ? "\0017" : "\0013", "%s(%#x)=%#x\n" , "DMA_GUC_WOPCM_OFFSET", i915_mmio_reg_offset(((const i915_reg_t ){ .reg = (0xc340) })), intel_uncore_read(uncore, ((const i915_reg_t ){ .reg = (0xc340) }))) |
| 401 | intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET))__i915_printk(gt->i915, 0 ? "\0017" : "\0013", "%s(%#x)=%#x\n" , "DMA_GUC_WOPCM_OFFSET", i915_mmio_reg_offset(((const i915_reg_t ){ .reg = (0xc340) })), intel_uncore_read(uncore, ((const i915_reg_t ){ .reg = (0xc340) }))); |
| 402 | i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",__i915_printk(gt->i915, 0 ? "\0017" : "\0013", "%s(%#x)=%#x\n" , "GUC_WOPCM_SIZE", i915_mmio_reg_offset(((const i915_reg_t){ .reg = (0xc050) })), intel_uncore_read(uncore, ((const i915_reg_t ){ .reg = (0xc050) }))) |
| 403 | i915_mmio_reg_offset(GUC_WOPCM_SIZE),__i915_printk(gt->i915, 0 ? "\0017" : "\0013", "%s(%#x)=%#x\n" , "GUC_WOPCM_SIZE", i915_mmio_reg_offset(((const i915_reg_t){ .reg = (0xc050) })), intel_uncore_read(uncore, ((const i915_reg_t ){ .reg = (0xc050) }))) |
| 404 | intel_uncore_read(uncore, GUC_WOPCM_SIZE))__i915_printk(gt->i915, 0 ? "\0017" : "\0013", "%s(%#x)=%#x\n" , "GUC_WOPCM_SIZE", i915_mmio_reg_offset(((const i915_reg_t){ .reg = (0xc050) })), intel_uncore_read(uncore, ((const i915_reg_t ){ .reg = (0xc050) }))); |
| 405 | |
| 406 | return err; |
| 407 | } |
| 408 | |
| 409 | static bool_Bool uc_is_wopcm_locked(struct intel_uc *uc) |
| 410 | { |
| 411 | struct intel_gt *gt = uc_to_gt(uc); |
| 412 | struct intel_uncore *uncore = gt->uncore; |
| 413 | |
| 414 | return (intel_uncore_read(uncore, GUC_WOPCM_SIZE((const i915_reg_t){ .reg = (0xc050) })) & GUC_WOPCM_SIZE_LOCKED(1<<0)) || |
| 415 | (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET((const i915_reg_t){ .reg = (0xc340) })) & GUC_WOPCM_OFFSET_VALID(1<<0)); |
| 416 | } |
| 417 | |
| 418 | static int __uc_check_hw(struct intel_uc *uc) |
| 419 | { |
| 420 | if (!intel_uc_supports_guc(uc)) |
| 421 | return 0; |
| 422 | |
| 423 | /* |
| 424 | * We can silently continue without GuC only if it was never enabled |
| 425 | * before on this system after reboot, otherwise we risk GPU hangs. |
| 426 | * To check if GuC was loaded before we look at WOPCM registers. |
| 427 | */ |
| 428 | if (uc_is_wopcm_locked(uc)) |
| 429 | return -EIO5; |
| 430 | |
| 431 | return 0; |
| 432 | } |
| 433 | |
| 434 | static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw) |
| 435 | { |
| 436 | struct drm_i915_privateinteldrm_softc *i915 = uc_to_gt(uc)->i915; |
Value stored to 'i915' during its initialization is never read | |
| 437 | |
| 438 | drm_info(&i915->drm, "%s firmware %s version %u.%u.%u\n",do { } while(0) |
| 439 | intel_uc_fw_type_repr(fw->type), fw->file_selected.path,do { } while(0) |
| 440 | fw->file_selected.major_ver,do { } while(0) |
| 441 | fw->file_selected.minor_ver,do { } while(0) |
| 442 | fw->file_selected.patch_ver)do { } while(0); |
| 443 | } |
| 444 | |
| 445 | static int __uc_init_hw(struct intel_uc *uc) |
| 446 | { |
| 447 | struct drm_i915_privateinteldrm_softc *i915 = uc_to_gt(uc)->i915; |
| 448 | struct intel_guc *guc = &uc->guc; |
| 449 | struct intel_huc *huc = &uc->huc; |
| 450 | int ret, attempts; |
| 451 | |
| 452 | GEM_BUG_ON(!intel_uc_supports_guc(uc))((void)0); |
| 453 | GEM_BUG_ON(!intel_uc_wants_guc(uc))((void)0); |
| 454 | |
| 455 | print_fw_ver(uc, &guc->fw); |
| 456 | |
| 457 | if (intel_uc_uses_huc(uc)) |
| 458 | print_fw_ver(uc, &huc->fw); |
| 459 | |
| 460 | if (!intel_uc_fw_is_loadable(&guc->fw)) { |
| 461 | ret = __uc_check_hw(uc) || |
| 462 | intel_uc_fw_is_overridden(&guc->fw) || |
| 463 | intel_uc_wants_guc_submission(uc) ? |
| 464 | intel_uc_fw_status_to_error(guc->fw.status) : 0; |
| 465 | goto err_out; |
| 466 | } |
| 467 | |
| 468 | ret = uc_init_wopcm(uc); |
| 469 | if (ret) |
| 470 | goto err_out; |
| 471 | |
| 472 | intel_guc_reset_interrupts(guc); |
| 473 | |
| 474 | /* WaEnableuKernelHeaderValidFix:skl */ |
| 475 | /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ |
| 476 | if (GRAPHICS_VER(i915)((&(i915)->__runtime)->graphics.ip.ver) == 9) |
| 477 | attempts = 3; |
| 478 | else |
| 479 | attempts = 1; |
| 480 | |
| 481 | intel_rps_raise_unslice(&uc_to_gt(uc)->rps); |
| 482 | |
| 483 | while (attempts--) { |
| 484 | /* |
| 485 | * Always reset the GuC just before (re)loading, so |
| 486 | * that the state and timing are fairly predictable |
| 487 | */ |
| 488 | ret = __uc_sanitize(uc); |
| 489 | if (ret) |
| 490 | goto err_out; |
| 491 | |
| 492 | intel_huc_fw_upload(huc); |
| 493 | intel_guc_ads_reset(guc); |
| 494 | intel_guc_write_params(guc); |
| 495 | ret = intel_guc_fw_upload(guc); |
| 496 | if (ret == 0) |
| 497 | break; |
| 498 | |
| 499 | DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "___drm_dbg(((void *)0), DRM_UT_DRIVER, "GuC fw load failed: %d; will reset and " "retry %d more time(s)\n", ret, attempts) |
| 500 | "retry %d more time(s)\n", ret, attempts)___drm_dbg(((void *)0), DRM_UT_DRIVER, "GuC fw load failed: %d; will reset and " "retry %d more time(s)\n", ret, attempts); |
| 501 | } |
| 502 | |
| 503 | /* Did we succeded or run out of retries? */ |
| 504 | if (ret) |
| 505 | goto err_log_capture; |
| 506 | |
| 507 | ret = guc_enable_communication(guc); |
| 508 | if (ret) |
| 509 | goto err_log_capture; |
| 510 | |
| 511 | /* |
| 512 | * GSC-loaded HuC is authenticated by the GSC, so we don't need to |
| 513 | * trigger the auth here. However, given that the HuC loaded this way |
| 514 | * survive GT reset, we still need to update our SW bookkeeping to make |
| 515 | * sure it reflects the correct HW status. |
| 516 | */ |
| 517 | if (intel_huc_is_loaded_by_gsc(huc)) |
| 518 | intel_huc_update_auth_status(huc); |
| 519 | else |
| 520 | intel_huc_auth(huc); |
| 521 | |
| 522 | if (intel_uc_uses_guc_submission(uc)) |
| 523 | intel_guc_submission_enable(guc); |
| 524 | |
| 525 | if (intel_uc_uses_guc_slpc(uc)) { |
| 526 | ret = intel_guc_slpc_enable(&guc->slpc); |
| 527 | if (ret) |
| 528 | goto err_submission; |
| 529 | } else { |
| 530 | /* Restore GT back to RPn for non-SLPC path */ |
| 531 | intel_rps_lower_unslice(&uc_to_gt(uc)->rps); |
| 532 | } |
| 533 | |
| 534 | drm_info(&i915->drm, "GuC submission %s\n",do { } while(0) |
| 535 | str_enabled_disabled(intel_uc_uses_guc_submission(uc)))do { } while(0); |
| 536 | drm_info(&i915->drm, "GuC SLPC %s\n",do { } while(0) |
| 537 | str_enabled_disabled(intel_uc_uses_guc_slpc(uc)))do { } while(0); |
| 538 | |
| 539 | return 0; |
| 540 | |
| 541 | /* |
| 542 | * We've failed to load the firmware :( |
| 543 | */ |
| 544 | err_submission: |
| 545 | intel_guc_submission_disable(guc); |
| 546 | err_log_capture: |
| 547 | __uc_capture_load_err_log(uc); |
| 548 | err_out: |
| 549 | /* Return GT back to RPn */ |
| 550 | intel_rps_lower_unslice(&uc_to_gt(uc)->rps); |
| 551 | |
| 552 | __uc_sanitize(uc); |
| 553 | |
| 554 | if (!ret) { |
| 555 | drm_notice(&i915->drm, "GuC is uninitialized\n")printf("drm:pid%d:%s *NOTICE* " "[drm] " "GuC is uninitialized\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
| 556 | /* We want to run without GuC submission */ |
| 557 | return 0; |
| 558 | } |
| 559 | |
| 560 | i915_probe_error(i915, "GuC initialization failed %d\n", ret)__i915_printk(i915, 0 ? "\0017" : "\0013", "GuC initialization failed %d\n" , ret); |
| 561 | |
| 562 | /* We want to keep KMS alive */ |
| 563 | return -EIO5; |
| 564 | } |
| 565 | |
| 566 | static void __uc_fini_hw(struct intel_uc *uc) |
| 567 | { |
| 568 | struct intel_guc *guc = &uc->guc; |
| 569 | |
| 570 | if (!intel_guc_is_fw_running(guc)) |
| 571 | return; |
| 572 | |
| 573 | if (intel_uc_uses_guc_submission(uc)) |
| 574 | intel_guc_submission_disable(guc); |
| 575 | |
| 576 | __uc_sanitize(uc); |
| 577 | } |
| 578 | |
| 579 | /** |
| 580 | * intel_uc_reset_prepare - Prepare for reset |
| 581 | * @uc: the intel_uc structure |
| 582 | * |
| 583 | * Preparing for full gpu reset. |
| 584 | */ |
| 585 | void intel_uc_reset_prepare(struct intel_uc *uc) |
| 586 | { |
| 587 | struct intel_guc *guc = &uc->guc; |
| 588 | |
| 589 | uc->reset_in_progress = true1; |
| 590 | |
| 591 | /* Nothing to do if GuC isn't supported */ |
| 592 | if (!intel_uc_supports_guc(uc)) |
| 593 | return; |
| 594 | |
| 595 | /* Firmware expected to be running when this function is called */ |
| 596 | if (!intel_guc_is_ready(guc)) |
| 597 | goto sanitize; |
| 598 | |
| 599 | if (intel_uc_uses_guc_submission(uc)) |
| 600 | intel_guc_submission_reset_prepare(guc); |
| 601 | |
| 602 | sanitize: |
| 603 | __uc_sanitize(uc); |
| 604 | } |
| 605 | |
| 606 | void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled) |
| 607 | { |
| 608 | struct intel_guc *guc = &uc->guc; |
| 609 | |
| 610 | /* Firmware can not be running when this function is called */ |
| 611 | if (intel_uc_uses_guc_submission(uc)) |
| 612 | intel_guc_submission_reset(guc, stalled); |
| 613 | } |
| 614 | |
| 615 | void intel_uc_reset_finish(struct intel_uc *uc) |
| 616 | { |
| 617 | struct intel_guc *guc = &uc->guc; |
| 618 | |
| 619 | uc->reset_in_progress = false0; |
| 620 | |
| 621 | /* Firmware expected to be running when this function is called */ |
| 622 | if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc)) |
| 623 | intel_guc_submission_reset_finish(guc); |
| 624 | } |
| 625 | |
| 626 | void intel_uc_cancel_requests(struct intel_uc *uc) |
| 627 | { |
| 628 | struct intel_guc *guc = &uc->guc; |
| 629 | |
| 630 | /* Firmware can not be running when this function is called */ |
| 631 | if (intel_uc_uses_guc_submission(uc)) |
| 632 | intel_guc_submission_cancel_requests(guc); |
| 633 | } |
| 634 | |
| 635 | void intel_uc_runtime_suspend(struct intel_uc *uc) |
| 636 | { |
| 637 | struct intel_guc *guc = &uc->guc; |
| 638 | |
| 639 | if (!intel_guc_is_ready(guc)) |
| 640 | return; |
| 641 | |
| 642 | /* |
| 643 | * Wait for any outstanding CTB before tearing down communication /w the |
| 644 | * GuC. |
| 645 | */ |
| 646 | #define OUTSTANDING_CTB_TIMEOUT_PERIOD(hz / 5) (HZhz / 5) |
| 647 | intel_guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h, |
| 648 | false0, OUTSTANDING_CTB_TIMEOUT_PERIOD(hz / 5)); |
| 649 | GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h))({ __builtin_expect(!!(!!(({ typeof(*(&guc->outstanding_submission_g2h )) __tmp = *(volatile typeof(*(&guc->outstanding_submission_g2h )) *)&(*(&guc->outstanding_submission_g2h)); membar_datadep_consumer (); __tmp; }))), 0); }); |
| 650 | |
| 651 | guc_disable_communication(guc); |
| 652 | } |
| 653 | |
| 654 | void intel_uc_suspend(struct intel_uc *uc) |
| 655 | { |
| 656 | struct intel_guc *guc = &uc->guc; |
| 657 | intel_wakeref_t wakeref; |
| 658 | int err; |
| 659 | |
| 660 | if (!intel_guc_is_ready(guc)) |
| 661 | return; |
| 662 | |
| 663 | with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref)for ((wakeref) = intel_runtime_pm_get(&uc_to_gt(uc)->i915 ->runtime_pm); (wakeref); intel_runtime_pm_put((&uc_to_gt (uc)->i915->runtime_pm), (wakeref)), (wakeref) = 0) { |
| 664 | err = intel_guc_suspend(guc); |
| 665 | if (err) |
| 666 | DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err)___drm_dbg(((void *)0), DRM_UT_DRIVER, "Failed to suspend GuC, err=%d" , err); |
| 667 | } |
| 668 | } |
| 669 | |
| 670 | static int __uc_resume(struct intel_uc *uc, bool_Bool enable_communication) |
| 671 | { |
| 672 | struct intel_guc *guc = &uc->guc; |
| 673 | struct intel_gt *gt = guc_to_gt(guc); |
| 674 | int err; |
| 675 | |
| 676 | if (!intel_guc_is_fw_running(guc)) |
| 677 | return 0; |
| 678 | |
| 679 | /* Make sure we enable communication if and only if it's disabled */ |
| 680 | GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct))((void)0); |
| 681 | |
| 682 | if (enable_communication) |
| 683 | guc_enable_communication(guc); |
| 684 | |
| 685 | /* If we are only resuming GuC communication but not reloading |
| 686 | * GuC, we need to ensure the ARAT timer interrupt is enabled |
| 687 | * again. In case of GuC reload, it is enabled during SLPC enable. |
| 688 | */ |
| 689 | if (enable_communication && intel_uc_uses_guc_slpc(uc)) |
| 690 | intel_guc_pm_intrmsk_enable(gt); |
| 691 | |
| 692 | err = intel_guc_resume(guc); |
| 693 | if (err) { |
| 694 | DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err)___drm_dbg(((void *)0), DRM_UT_DRIVER, "Failed to resume GuC, err=%d" , err); |
| 695 | return err; |
| 696 | } |
| 697 | |
| 698 | return 0; |
| 699 | } |
| 700 | |
| 701 | int intel_uc_resume(struct intel_uc *uc) |
| 702 | { |
| 703 | /* |
| 704 | * When coming out of S3/S4 we sanitize and re-init the HW, so |
| 705 | * communication is already re-enabled at this point. |
| 706 | */ |
| 707 | return __uc_resume(uc, false0); |
| 708 | } |
| 709 | |
| 710 | int intel_uc_runtime_resume(struct intel_uc *uc) |
| 711 | { |
| 712 | /* |
| 713 | * During runtime resume we don't sanitize, so we need to re-init |
| 714 | * communication as well. |
| 715 | */ |
| 716 | return __uc_resume(uc, true1); |
| 717 | } |
| 718 | |
| 719 | static const struct intel_uc_ops uc_ops_off = { |
| 720 | .init_hw = __uc_check_hw, |
| 721 | }; |
| 722 | |
| 723 | static const struct intel_uc_ops uc_ops_on = { |
| 724 | .sanitize = __uc_sanitize, |
| 725 | |
| 726 | .init_fw = __uc_fetch_firmwares, |
| 727 | .fini_fw = __uc_cleanup_firmwares, |
| 728 | |
| 729 | .init = __uc_init, |
| 730 | .fini = __uc_fini, |
| 731 | |
| 732 | .init_hw = __uc_init_hw, |
| 733 | .fini_hw = __uc_fini_hw, |
| 734 | }; |