| File: | dev/pci/drm/i915/gt/uc/intel_guc_slpc.c |
| Warning: | line 414, column 6 Branch condition evaluates to a garbage value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | // SPDX-License-Identifier: MIT | |||
| 2 | /* | |||
| 3 | * Copyright © 2021 Intel Corporation | |||
| 4 | */ | |||
| 5 | ||||
| 6 | #include <drm/drm_cache.h> | |||
| 7 | #include <linux/string_helpers.h> | |||
| 8 | ||||
| 9 | #include "i915_drv.h" | |||
| 10 | #include "i915_reg.h" | |||
| 11 | #include "intel_guc_slpc.h" | |||
| 12 | #include "intel_mchbar_regs.h" | |||
| 13 | #include "gt/intel_gt.h" | |||
| 14 | #include "gt/intel_gt_regs.h" | |||
| 15 | #include "gt/intel_rps.h" | |||
| 16 | ||||
| 17 | static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc) | |||
| 18 | { | |||
| 19 | return container_of(slpc, struct intel_guc, slpc)({ const __typeof( ((struct intel_guc *)0)->slpc ) *__mptr = (slpc); (struct intel_guc *)( (char *)__mptr - __builtin_offsetof (struct intel_guc, slpc) );}); | |||
| 20 | } | |||
| 21 | ||||
| 22 | static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc) | |||
| 23 | { | |||
| 24 | return guc_to_gt(slpc_to_guc(slpc)); | |||
| 25 | } | |||
| 26 | ||||
| 27 | static inline struct drm_i915_privateinteldrm_softc *slpc_to_i915(struct intel_guc_slpc *slpc) | |||
| 28 | { | |||
| 29 | return slpc_to_gt(slpc)->i915; | |||
| 30 | } | |||
| 31 | ||||
| 32 | static bool_Bool __detect_slpc_supported(struct intel_guc *guc) | |||
| 33 | { | |||
| 34 | /* GuC SLPC is unavailable for pre-Gen12 */ | |||
| 35 | return guc->submission_supported && | |||
| 36 | GRAPHICS_VER(guc_to_gt(guc)->i915)((&(guc_to_gt(guc)->i915)->__runtime)->graphics. ip.ver) >= 12; | |||
| 37 | } | |||
| 38 | ||||
| 39 | static bool_Bool __guc_slpc_selected(struct intel_guc *guc) | |||
| 40 | { | |||
| 41 | if (!intel_guc_slpc_is_supported(guc)) | |||
| 42 | return false0; | |||
| 43 | ||||
| 44 | return guc->submission_selected; | |||
| 45 | } | |||
| 46 | ||||
| 47 | void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc) | |||
| 48 | { | |||
| 49 | struct intel_guc *guc = slpc_to_guc(slpc); | |||
| 50 | ||||
| 51 | slpc->supported = __detect_slpc_supported(guc); | |||
| 52 | slpc->selected = __guc_slpc_selected(guc); | |||
| 53 | } | |||
| 54 | ||||
| 55 | static void slpc_mem_set_param(struct slpc_shared_data *data, | |||
| 56 | u32 id, u32 value) | |||
| 57 | { | |||
| 58 | GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS)((void)0); | |||
| 59 | /* | |||
| 60 | * When the flag bit is set, corresponding value will be read | |||
| 61 | * and applied by SLPC. | |||
| 62 | */ | |||
| 63 | data->override_params.bits[id >> 5] |= (1 << (id % 32)); | |||
| 64 | data->override_params.values[id] = value; | |||
| 65 | } | |||
| 66 | ||||
| 67 | static void slpc_mem_set_enabled(struct slpc_shared_data *data, | |||
| 68 | u8 enable_id, u8 disable_id) | |||
| 69 | { | |||
| 70 | /* | |||
| 71 | * Enabling a param involves setting the enable_id | |||
| 72 | * to 1 and disable_id to 0. | |||
| 73 | */ | |||
| 74 | slpc_mem_set_param(data, enable_id, 1); | |||
| 75 | slpc_mem_set_param(data, disable_id, 0); | |||
| 76 | } | |||
| 77 | ||||
| 78 | static void slpc_mem_set_disabled(struct slpc_shared_data *data, | |||
| 79 | u8 enable_id, u8 disable_id) | |||
| 80 | { | |||
| 81 | /* | |||
| 82 | * Disabling a param involves setting the enable_id | |||
| 83 | * to 0 and disable_id to 1. | |||
| 84 | */ | |||
| 85 | slpc_mem_set_param(data, disable_id, 1); | |||
| 86 | slpc_mem_set_param(data, enable_id, 0); | |||
| 87 | } | |||
| 88 | ||||
| 89 | static u32 slpc_get_state(struct intel_guc_slpc *slpc) | |||
| 90 | { | |||
| 91 | struct slpc_shared_data *data; | |||
| 92 | ||||
| 93 | GEM_BUG_ON(!slpc->vma)((void)0); | |||
| 94 | ||||
| 95 | drm_clflush_virt_range(slpc->vaddr, sizeof(u32)); | |||
| 96 | data = slpc->vaddr; | |||
| 97 | ||||
| 98 | return data->header.global_state; | |||
| 99 | } | |||
| 100 | ||||
| 101 | static int guc_action_slpc_set_param_nb(struct intel_guc *guc, u8 id, u32 value) | |||
| 102 | { | |||
| 103 | u32 request[] = { | |||
| 104 | GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST0x3003, | |||
| 105 | SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2)((((typeof((0xff << 8)))(SLPC_EVENT_PARAMETER_SET) << (__builtin_ffsll((0xff << 8)) - 1)) & ((0xff << 8))) | (((typeof((0xff << 0)))(2) << (__builtin_ffsll ((0xff << 0)) - 1)) & ((0xff << 0))) ), | |||
| 106 | id, | |||
| 107 | value, | |||
| 108 | }; | |||
| 109 | int ret; | |||
| 110 | ||||
| 111 | ret = intel_guc_send_nb(guc, request, ARRAY_SIZE(request)(sizeof((request)) / sizeof((request)[0])), 0); | |||
| 112 | ||||
| 113 | return ret > 0 ? -EPROTO95 : ret; | |||
| 114 | } | |||
| 115 | ||||
| 116 | static int slpc_set_param_nb(struct intel_guc_slpc *slpc, u8 id, u32 value) | |||
| 117 | { | |||
| 118 | struct intel_guc *guc = slpc_to_guc(slpc); | |||
| 119 | ||||
| 120 | GEM_BUG_ON(id >= SLPC_MAX_PARAM)((void)0); | |||
| 121 | ||||
| 122 | return guc_action_slpc_set_param_nb(guc, id, value); | |||
| 123 | } | |||
| 124 | ||||
| 125 | static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value) | |||
| 126 | { | |||
| 127 | u32 request[] = { | |||
| 128 | GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST0x3003, | |||
| 129 | SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2)((((typeof((0xff << 8)))(SLPC_EVENT_PARAMETER_SET) << (__builtin_ffsll((0xff << 8)) - 1)) & ((0xff << 8))) | (((typeof((0xff << 0)))(2) << (__builtin_ffsll ((0xff << 0)) - 1)) & ((0xff << 0))) ), | |||
| 130 | id, | |||
| 131 | value, | |||
| 132 | }; | |||
| 133 | int ret; | |||
| 134 | ||||
| 135 | ret = intel_guc_send(guc, request, ARRAY_SIZE(request)(sizeof((request)) / sizeof((request)[0]))); | |||
| 136 | ||||
| 137 | return ret > 0 ? -EPROTO95 : ret; | |||
| 138 | } | |||
| 139 | ||||
| 140 | static bool_Bool slpc_is_running(struct intel_guc_slpc *slpc) | |||
| 141 | { | |||
| 142 | return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING; | |||
| 143 | } | |||
| 144 | ||||
| 145 | static int guc_action_slpc_query(struct intel_guc *guc, u32 offset) | |||
| 146 | { | |||
| 147 | u32 request[] = { | |||
| 148 | GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST0x3003, | |||
| 149 | SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2)((((typeof((0xff << 8)))(SLPC_EVENT_QUERY_TASK_STATE) << (__builtin_ffsll((0xff << 8)) - 1)) & ((0xff << 8))) | (((typeof((0xff << 0)))(2) << (__builtin_ffsll ((0xff << 0)) - 1)) & ((0xff << 0))) ), | |||
| 150 | offset, | |||
| 151 | 0, | |||
| 152 | }; | |||
| 153 | int ret; | |||
| 154 | ||||
| 155 | ret = intel_guc_send(guc, request, ARRAY_SIZE(request)(sizeof((request)) / sizeof((request)[0]))); | |||
| 156 | ||||
| 157 | return ret > 0 ? -EPROTO95 : ret; | |||
| 158 | } | |||
| 159 | ||||
| 160 | static int slpc_query_task_state(struct intel_guc_slpc *slpc) | |||
| 161 | { | |||
| 162 | struct intel_guc *guc = slpc_to_guc(slpc); | |||
| 163 | struct drm_i915_privateinteldrm_softc *i915 = slpc_to_i915(slpc); | |||
| 164 | u32 offset = intel_guc_ggtt_offset(guc, slpc->vma); | |||
| 165 | int ret; | |||
| 166 | ||||
| 167 | ret = guc_action_slpc_query(guc, offset); | |||
| 168 | if (unlikely(ret)__builtin_expect(!!(ret), 0)) | |||
| 169 | i915_probe_error(i915, "Failed to query task state (%pe)\n",__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to query task state (%pe)\n" , ERR_PTR(ret)) | |||
| 170 | ERR_PTR(ret))__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to query task state (%pe)\n" , ERR_PTR(ret)); | |||
| 171 | ||||
| 172 | drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES4096); | |||
| 173 | ||||
| 174 | return ret; | |||
| 175 | } | |||
| 176 | ||||
| 177 | static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value) | |||
| 178 | { | |||
| 179 | struct intel_guc *guc = slpc_to_guc(slpc); | |||
| 180 | struct drm_i915_privateinteldrm_softc *i915 = slpc_to_i915(slpc); | |||
| 181 | int ret; | |||
| 182 | ||||
| 183 | GEM_BUG_ON(id >= SLPC_MAX_PARAM)((void)0); | |||
| 184 | ||||
| 185 | ret = guc_action_slpc_set_param(guc, id, value); | |||
| 186 | if (ret) | |||
| 187 | i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n",__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to set param %d to %u (%pe)\n" , id, value, ERR_PTR(ret)) | |||
| 188 | id, value, ERR_PTR(ret))__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to set param %d to %u (%pe)\n" , id, value, ERR_PTR(ret)); | |||
| 189 | ||||
| 190 | return ret; | |||
| 191 | } | |||
| 192 | ||||
| 193 | static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq) | |||
| 194 | { | |||
| 195 | struct drm_i915_privateinteldrm_softc *i915 = slpc_to_i915(slpc); | |||
| 196 | struct intel_guc *guc = slpc_to_guc(slpc); | |||
| 197 | intel_wakeref_t wakeref; | |||
| 198 | int ret = 0; | |||
| 199 | ||||
| 200 | lockdep_assert_held(&slpc->lock)do { (void)(&slpc->lock); } while(0); | |||
| 201 | ||||
| 202 | if (!intel_guc_is_ready(guc)) | |||
| 203 | return -ENODEV19; | |||
| 204 | ||||
| 205 | /* | |||
| 206 | * This function is a little different as compared to | |||
| 207 | * intel_guc_slpc_set_min_freq(). Softlimit will not be updated | |||
| 208 | * here since this is used to temporarily change min freq, | |||
| 209 | * for example, during a waitboost. Caller is responsible for | |||
| 210 | * checking bounds. | |||
| 211 | */ | |||
| 212 | ||||
| 213 | with_intel_runtime_pm(&i915->runtime_pm, wakeref)for ((wakeref) = intel_runtime_pm_get(&i915->runtime_pm ); (wakeref); intel_runtime_pm_put((&i915->runtime_pm) , (wakeref)), (wakeref) = 0) { | |||
| 214 | /* Non-blocking request will avoid stalls */ | |||
| 215 | ret = slpc_set_param_nb(slpc, | |||
| 216 | SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, | |||
| 217 | freq); | |||
| 218 | if (ret) | |||
| 219 | drm_notice(&i915->drm,printf("drm:pid%d:%s *NOTICE* " "[drm] " "Failed to send set_param for min freq(%d): (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , freq, ret ) | |||
| 220 | "Failed to send set_param for min freq(%d): (%d)\n",printf("drm:pid%d:%s *NOTICE* " "[drm] " "Failed to send set_param for min freq(%d): (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , freq, ret ) | |||
| 221 | freq, ret)printf("drm:pid%d:%s *NOTICE* " "[drm] " "Failed to send set_param for min freq(%d): (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , freq, ret ); | |||
| 222 | } | |||
| 223 | ||||
| 224 | return ret; | |||
| 225 | } | |||
| 226 | ||||
| 227 | static void slpc_boost_work(struct work_struct *work) | |||
| 228 | { | |||
| 229 | struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work)({ const __typeof( ((typeof(*slpc) *)0)->boost_work ) *__mptr = (work); (typeof(*slpc) *)( (char *)__mptr - __builtin_offsetof (typeof(*slpc), boost_work) );}); | |||
| 230 | int err; | |||
| 231 | ||||
| 232 | /* | |||
| 233 | * Raise min freq to boost. It's possible that | |||
| 234 | * this is greater than current max. But it will | |||
| 235 | * certainly be limited by RP0. An error setting | |||
| 236 | * the min param is not fatal. | |||
| 237 | */ | |||
| 238 | mutex_lock(&slpc->lock)rw_enter_write(&slpc->lock); | |||
| 239 | if (atomic_read(&slpc->num_waiters)({ typeof(*(&slpc->num_waiters)) __tmp = *(volatile typeof (*(&slpc->num_waiters)) *)&(*(&slpc->num_waiters )); membar_datadep_consumer(); __tmp; })) { | |||
| 240 | err = slpc_force_min_freq(slpc, slpc->boost_freq); | |||
| 241 | if (!err) | |||
| 242 | slpc->num_boosts++; | |||
| 243 | } | |||
| 244 | mutex_unlock(&slpc->lock)rw_exit_write(&slpc->lock); | |||
| 245 | } | |||
| 246 | ||||
| 247 | int intel_guc_slpc_init(struct intel_guc_slpc *slpc) | |||
| 248 | { | |||
| 249 | struct intel_guc *guc = slpc_to_guc(slpc); | |||
| 250 | struct drm_i915_privateinteldrm_softc *i915 = slpc_to_i915(slpc); | |||
| 251 | u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data))(((sizeof(struct slpc_shared_data)) + ((1 << 12) - 1)) & ~((1 << 12) - 1)); | |||
| 252 | int err; | |||
| 253 | ||||
| 254 | GEM_BUG_ON(slpc->vma)((void)0); | |||
| 255 | ||||
| 256 | err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); | |||
| 257 | if (unlikely(err)__builtin_expect(!!(err), 0)) { | |||
| 258 | i915_probe_error(i915,__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to allocate SLPC struct (err=%pe)\n" , ERR_PTR(err)) | |||
| 259 | "Failed to allocate SLPC struct (err=%pe)\n",__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to allocate SLPC struct (err=%pe)\n" , ERR_PTR(err)) | |||
| 260 | ERR_PTR(err))__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to allocate SLPC struct (err=%pe)\n" , ERR_PTR(err)); | |||
| 261 | return err; | |||
| 262 | } | |||
| 263 | ||||
| 264 | slpc->max_freq_softlimit = 0; | |||
| 265 | slpc->min_freq_softlimit = 0; | |||
| 266 | ||||
| 267 | slpc->boost_freq = 0; | |||
| 268 | atomic_set(&slpc->num_waiters, 0)({ typeof(*(&slpc->num_waiters)) __tmp = ((0)); *(volatile typeof(*(&slpc->num_waiters)) *)&(*(&slpc-> num_waiters)) = __tmp; __tmp; }); | |||
| 269 | slpc->num_boosts = 0; | |||
| 270 | slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL; | |||
| 271 | ||||
| 272 | rw_init(&slpc->lock, "slpc")_rw_init_flags(&slpc->lock, "slpc", 0, ((void *)0)); | |||
| 273 | INIT_WORK(&slpc->boost_work, slpc_boost_work); | |||
| 274 | ||||
| 275 | return err; | |||
| 276 | } | |||
| 277 | ||||
| 278 | static const char *slpc_global_state_to_string(enum slpc_global_state state) | |||
| 279 | { | |||
| 280 | switch (state) { | |||
| 281 | case SLPC_GLOBAL_STATE_NOT_RUNNING: | |||
| 282 | return "not running"; | |||
| 283 | case SLPC_GLOBAL_STATE_INITIALIZING: | |||
| 284 | return "initializing"; | |||
| 285 | case SLPC_GLOBAL_STATE_RESETTING: | |||
| 286 | return "resetting"; | |||
| 287 | case SLPC_GLOBAL_STATE_RUNNING: | |||
| 288 | return "running"; | |||
| 289 | case SLPC_GLOBAL_STATE_SHUTTING_DOWN: | |||
| 290 | return "shutting down"; | |||
| 291 | case SLPC_GLOBAL_STATE_ERROR: | |||
| 292 | return "error"; | |||
| 293 | default: | |||
| 294 | return "unknown"; | |||
| 295 | } | |||
| 296 | } | |||
| 297 | ||||
| 298 | static const char *slpc_get_state_string(struct intel_guc_slpc *slpc) | |||
| 299 | { | |||
| 300 | return slpc_global_state_to_string(slpc_get_state(slpc)); | |||
| 301 | } | |||
| 302 | ||||
| 303 | static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset) | |||
| 304 | { | |||
| 305 | u32 request[] = { | |||
| 306 | GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST0x3003, | |||
| 307 | SLPC_EVENT(SLPC_EVENT_RESET, 2)((((typeof((0xff << 8)))(SLPC_EVENT_RESET) << (__builtin_ffsll ((0xff << 8)) - 1)) & ((0xff << 8))) | (((typeof ((0xff << 0)))(2) << (__builtin_ffsll((0xff << 0)) - 1)) & ((0xff << 0))) ), | |||
| 308 | offset, | |||
| 309 | 0, | |||
| 310 | }; | |||
| 311 | int ret; | |||
| 312 | ||||
| 313 | ret = intel_guc_send(guc, request, ARRAY_SIZE(request)(sizeof((request)) / sizeof((request)[0]))); | |||
| 314 | ||||
| 315 | return ret > 0 ? -EPROTO95 : ret; | |||
| 316 | } | |||
| 317 | ||||
| 318 | static int slpc_reset(struct intel_guc_slpc *slpc) | |||
| 319 | { | |||
| 320 | struct drm_i915_privateinteldrm_softc *i915 = slpc_to_i915(slpc); | |||
| 321 | struct intel_guc *guc = slpc_to_guc(slpc); | |||
| 322 | u32 offset = intel_guc_ggtt_offset(guc, slpc->vma); | |||
| 323 | int ret; | |||
| 324 | ||||
| 325 | ret = guc_action_slpc_reset(guc, offset); | |||
| 326 | ||||
| 327 | if (unlikely(ret < 0)__builtin_expect(!!(ret < 0), 0)) { | |||
| 328 | i915_probe_error(i915, "SLPC reset action failed (%pe)\n",__i915_printk(i915, 0 ? "\0017" : "\0013", "SLPC reset action failed (%pe)\n" , ERR_PTR(ret)) | |||
| 329 | ERR_PTR(ret))__i915_printk(i915, 0 ? "\0017" : "\0013", "SLPC reset action failed (%pe)\n" , ERR_PTR(ret)); | |||
| 330 | return ret; | |||
| 331 | } | |||
| 332 | ||||
| 333 | if (!ret) { | |||
| 334 | if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)({ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (((5) * 1000))); long wait__ = ((10)); int ret__; assertwaitok (); for (;;) { const _Bool expired__ = ktime_after(ktime_get_raw (), end__); ; __asm volatile("" : : : "memory"); if (((slpc_is_running (slpc)))) { ret__ = 0; break; } if (expired__) { ret__ = -60; break; } usleep_range(wait__, wait__ * 2); if (wait__ < ( (1000))) wait__ <<= 1; } ret__; })) { | |||
| 335 | i915_probe_error(i915, "SLPC not enabled! State = %s\n",__i915_printk(i915, 0 ? "\0017" : "\0013", "SLPC not enabled! State = %s\n" , slpc_get_state_string(slpc)) | |||
| 336 | slpc_get_state_string(slpc))__i915_printk(i915, 0 ? "\0017" : "\0013", "SLPC not enabled! State = %s\n" , slpc_get_state_string(slpc)); | |||
| 337 | return -EIO5; | |||
| 338 | } | |||
| 339 | } | |||
| 340 | ||||
| 341 | return 0; | |||
| 342 | } | |||
| 343 | ||||
| 344 | static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc) | |||
| 345 | { | |||
| 346 | struct slpc_shared_data *data = slpc->vaddr; | |||
| 347 | ||||
| 348 | GEM_BUG_ON(!slpc->vma)((void)0); | |||
| 349 | ||||
| 350 | return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,(((((u32)((typeof(((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (8))) + 0))))(((data->task_state_data.freq ) & (((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL ) << (8))) + 0)))) >> (__builtin_ffsll(((u32)(((( ~0UL) >> (64 - (15) - 1)) & ((~0UL) << (8))) + 0))) - 1)))) * 50) + ((3) / 2)) / (3)) | |||
| 351 | data->task_state_data.freq) *(((((u32)((typeof(((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (8))) + 0))))(((data->task_state_data.freq ) & (((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL ) << (8))) + 0)))) >> (__builtin_ffsll(((u32)(((( ~0UL) >> (64 - (15) - 1)) & ((~0UL) << (8))) + 0))) - 1)))) * 50) + ((3) / 2)) / (3)) | |||
| 352 | GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER)(((((u32)((typeof(((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL) << (8))) + 0))))(((data->task_state_data.freq ) & (((u32)((((~0UL) >> (64 - (15) - 1)) & ((~0UL ) << (8))) + 0)))) >> (__builtin_ffsll(((u32)(((( ~0UL) >> (64 - (15) - 1)) & ((~0UL) << (8))) + 0))) - 1)))) * 50) + ((3) / 2)) / (3)); | |||
| 353 | } | |||
| 354 | ||||
| 355 | static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc) | |||
| 356 | { | |||
| 357 | struct slpc_shared_data *data = slpc->vaddr; | |||
| 358 | ||||
| 359 | GEM_BUG_ON(!slpc->vma)((void)0); | |||
| 360 | ||||
| 361 | return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,(((((u32)((typeof(((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL) << (0))) + 0))))(((data->task_state_data.freq ) & (((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL ) << (0))) + 0)))) >> (__builtin_ffsll(((u32)(((( ~0UL) >> (64 - (7) - 1)) & ((~0UL) << (0))) + 0))) - 1)))) * 50) + ((3) / 2)) / (3)) | |||
| 362 | data->task_state_data.freq) *(((((u32)((typeof(((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL) << (0))) + 0))))(((data->task_state_data.freq ) & (((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL ) << (0))) + 0)))) >> (__builtin_ffsll(((u32)(((( ~0UL) >> (64 - (7) - 1)) & ((~0UL) << (0))) + 0))) - 1)))) * 50) + ((3) / 2)) / (3)) | |||
| 363 | GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER)(((((u32)((typeof(((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL) << (0))) + 0))))(((data->task_state_data.freq ) & (((u32)((((~0UL) >> (64 - (7) - 1)) & ((~0UL ) << (0))) + 0)))) >> (__builtin_ffsll(((u32)(((( ~0UL) >> (64 - (7) - 1)) & ((~0UL) << (0))) + 0))) - 1)))) * 50) + ((3) / 2)) / (3)); | |||
| 364 | } | |||
| 365 | ||||
| 366 | static void slpc_shared_data_reset(struct slpc_shared_data *data) | |||
| 367 | { | |||
| 368 | memset(data, 0, sizeof(struct slpc_shared_data))__builtin_memset((data), (0), (sizeof(struct slpc_shared_data ))); | |||
| 369 | ||||
| 370 | data->header.size = sizeof(struct slpc_shared_data); | |||
| 371 | ||||
| 372 | /* Enable only GTPERF task, disable others */ | |||
| 373 | slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF, | |||
| 374 | SLPC_PARAM_TASK_DISABLE_GTPERF); | |||
| 375 | ||||
| 376 | slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER, | |||
| 377 | SLPC_PARAM_TASK_DISABLE_BALANCER); | |||
| 378 | ||||
| 379 | slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC, | |||
| 380 | SLPC_PARAM_TASK_DISABLE_DCC); | |||
| 381 | } | |||
| 382 | ||||
| 383 | /** | |||
| 384 | * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC. | |||
| 385 | * @slpc: pointer to intel_guc_slpc. | |||
| 386 | * @val: frequency (MHz) | |||
| 387 | * | |||
| 388 | * This function will invoke GuC SLPC action to update the max frequency | |||
| 389 | * limit for unslice. | |||
| 390 | * | |||
| 391 | * Return: 0 on success, non-zero error code on failure. | |||
| 392 | */ | |||
| 393 | int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val) | |||
| 394 | { | |||
| 395 | struct drm_i915_privateinteldrm_softc *i915 = slpc_to_i915(slpc); | |||
| 396 | intel_wakeref_t wakeref; | |||
| 397 | int ret; | |||
| 398 | ||||
| 399 | if (val < slpc->min_freq || | |||
| 400 | val > slpc->rp0_freq || | |||
| 401 | val < slpc->min_freq_softlimit) | |||
| 402 | return -EINVAL22; | |||
| 403 | ||||
| 404 | with_intel_runtime_pm(&i915->runtime_pm, wakeref)for ((wakeref) = intel_runtime_pm_get(&i915->runtime_pm ); (wakeref); intel_runtime_pm_put((&i915->runtime_pm) , (wakeref)), (wakeref) = 0) { | |||
| 405 | ret = slpc_set_param(slpc, | |||
| 406 | SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ, | |||
| 407 | val); | |||
| 408 | ||||
| 409 | /* Return standardized err code for sysfs calls */ | |||
| 410 | if (ret) | |||
| 411 | ret = -EIO5; | |||
| 412 | } | |||
| 413 | ||||
| 414 | if (!ret) | |||
| ||||
| 415 | slpc->max_freq_softlimit = val; | |||
| 416 | ||||
| 417 | return ret; | |||
| 418 | } | |||
| 419 | ||||
| 420 | /** | |||
| 421 | * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC. | |||
| 422 | * @slpc: pointer to intel_guc_slpc. | |||
| 423 | * @val: pointer to val which will hold max frequency (MHz) | |||
| 424 | * | |||
| 425 | * This function will invoke GuC SLPC action to read the max frequency | |||
| 426 | * limit for unslice. | |||
| 427 | * | |||
| 428 | * Return: 0 on success, non-zero error code on failure. | |||
| 429 | */ | |||
| 430 | int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val) | |||
| 431 | { | |||
| 432 | struct drm_i915_privateinteldrm_softc *i915 = slpc_to_i915(slpc); | |||
| 433 | intel_wakeref_t wakeref; | |||
| 434 | int ret = 0; | |||
| 435 | ||||
| 436 | with_intel_runtime_pm(&i915->runtime_pm, wakeref)for ((wakeref) = intel_runtime_pm_get(&i915->runtime_pm ); (wakeref); intel_runtime_pm_put((&i915->runtime_pm) , (wakeref)), (wakeref) = 0) { | |||
| 437 | /* Force GuC to update task data */ | |||
| 438 | ret = slpc_query_task_state(slpc); | |||
| 439 | ||||
| 440 | if (!ret) | |||
| 441 | *val = slpc_decode_max_freq(slpc); | |||
| 442 | } | |||
| 443 | ||||
| 444 | return ret; | |||
| 445 | } | |||
| 446 | ||||
| 447 | /** | |||
| 448 | * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC. | |||
| 449 | * @slpc: pointer to intel_guc_slpc. | |||
| 450 | * @val: frequency (MHz) | |||
| 451 | * | |||
| 452 | * This function will invoke GuC SLPC action to update the min unslice | |||
| 453 | * frequency. | |||
| 454 | * | |||
| 455 | * Return: 0 on success, non-zero error code on failure. | |||
| 456 | */ | |||
| 457 | int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) | |||
| 458 | { | |||
| 459 | struct drm_i915_privateinteldrm_softc *i915 = slpc_to_i915(slpc); | |||
| 460 | intel_wakeref_t wakeref; | |||
| 461 | int ret; | |||
| 462 | ||||
| 463 | if (val < slpc->min_freq || | |||
| 464 | val > slpc->rp0_freq || | |||
| 465 | val > slpc->max_freq_softlimit) | |||
| 466 | return -EINVAL22; | |||
| 467 | ||||
| 468 | /* Need a lock now since waitboost can be modifying min as well */ | |||
| 469 | mutex_lock(&slpc->lock)rw_enter_write(&slpc->lock); | |||
| 470 | wakeref = intel_runtime_pm_get(&i915->runtime_pm); | |||
| 471 | ||||
| 472 | /* Ignore efficient freq if lower min freq is requested */ | |||
| 473 | ret = slpc_set_param(slpc, | |||
| 474 | SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, | |||
| 475 | val < slpc->rp1_freq); | |||
| 476 | if (ret) { | |||
| 477 | i915_probe_error(i915, "Failed to toggle efficient freq (%pe)\n",__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to toggle efficient freq (%pe)\n" , ERR_PTR(ret)) | |||
| 478 | ERR_PTR(ret))__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to toggle efficient freq (%pe)\n" , ERR_PTR(ret)); | |||
| 479 | goto out; | |||
| 480 | } | |||
| 481 | ||||
| 482 | ret = slpc_set_param(slpc, | |||
| 483 | SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, | |||
| 484 | val); | |||
| 485 | ||||
| 486 | if (!ret) | |||
| 487 | slpc->min_freq_softlimit = val; | |||
| 488 | ||||
| 489 | out: | |||
| 490 | intel_runtime_pm_put(&i915->runtime_pm, wakeref); | |||
| 491 | mutex_unlock(&slpc->lock)rw_exit_write(&slpc->lock); | |||
| 492 | ||||
| 493 | /* Return standardized err code for sysfs calls */ | |||
| 494 | if (ret) | |||
| 495 | ret = -EIO5; | |||
| 496 | ||||
| 497 | return ret; | |||
| 498 | } | |||
| 499 | ||||
| 500 | /** | |||
| 501 | * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC. | |||
| 502 | * @slpc: pointer to intel_guc_slpc. | |||
| 503 | * @val: pointer to val which will hold min frequency (MHz) | |||
| 504 | * | |||
| 505 | * This function will invoke GuC SLPC action to read the min frequency | |||
| 506 | * limit for unslice. | |||
| 507 | * | |||
| 508 | * Return: 0 on success, non-zero error code on failure. | |||
| 509 | */ | |||
| 510 | int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val) | |||
| 511 | { | |||
| 512 | struct drm_i915_privateinteldrm_softc *i915 = slpc_to_i915(slpc); | |||
| 513 | intel_wakeref_t wakeref; | |||
| 514 | int ret = 0; | |||
| 515 | ||||
| 516 | with_intel_runtime_pm(&i915->runtime_pm, wakeref)for ((wakeref) = intel_runtime_pm_get(&i915->runtime_pm ); (wakeref); intel_runtime_pm_put((&i915->runtime_pm) , (wakeref)), (wakeref) = 0) { | |||
| 517 | /* Force GuC to update task data */ | |||
| 518 | ret = slpc_query_task_state(slpc); | |||
| 519 | ||||
| 520 | if (!ret) | |||
| 521 | *val = slpc_decode_min_freq(slpc); | |||
| 522 | } | |||
| 523 | ||||
| 524 | return ret; | |||
| 525 | } | |||
| 526 | ||||
| 527 | int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val) | |||
| 528 | { | |||
| 529 | struct drm_i915_privateinteldrm_softc *i915 = slpc_to_i915(slpc); | |||
| 530 | intel_wakeref_t wakeref; | |||
| 531 | int ret = 0; | |||
| 532 | ||||
| 533 | if (!HAS_MEDIA_RATIO_MODE(i915)((&(i915)->__info)->has_media_ratio_mode)) | |||
| 534 | return -ENODEV19; | |||
| 535 | ||||
| 536 | with_intel_runtime_pm(&i915->runtime_pm, wakeref)for ((wakeref) = intel_runtime_pm_get(&i915->runtime_pm ); (wakeref); intel_runtime_pm_put((&i915->runtime_pm) , (wakeref)), (wakeref) = 0) | |||
| 537 | ret = slpc_set_param(slpc, | |||
| 538 | SLPC_PARAM_MEDIA_FF_RATIO_MODE, | |||
| 539 | val); | |||
| 540 | return ret; | |||
| 541 | } | |||
| 542 | ||||
| 543 | void intel_guc_pm_intrmsk_enable(struct intel_gt *gt) | |||
| 544 | { | |||
| 545 | u32 pm_intrmsk_mbz = 0; | |||
| 546 | ||||
| 547 | /* | |||
| 548 | * Allow GuC to receive ARAT timer expiry event. | |||
| 549 | * This interrupt register is setup by RPS code | |||
| 550 | * when host based Turbo is enabled. | |||
| 551 | */ | |||
| 552 | pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK(1 << 9); | |||
| 553 | ||||
| 554 | intel_uncore_rmw(gt->uncore, | |||
| 555 | GEN6_PMINTRMSK((const i915_reg_t){ .reg = (0xa168) }), pm_intrmsk_mbz, 0); | |||
| 556 | } | |||
| 557 | ||||
| 558 | static int slpc_set_softlimits(struct intel_guc_slpc *slpc) | |||
| 559 | { | |||
| 560 | int ret = 0; | |||
| 561 | ||||
| 562 | /* | |||
| 563 | * Softlimits are initially equivalent to platform limits | |||
| 564 | * unless they have deviated from defaults, in which case, | |||
| 565 | * we retain the values and set min/max accordingly. | |||
| 566 | */ | |||
| 567 | if (!slpc->max_freq_softlimit) { | |||
| 568 | slpc->max_freq_softlimit = slpc->rp0_freq; | |||
| 569 | slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit; | |||
| 570 | } else if (slpc->max_freq_softlimit != slpc->rp0_freq) { | |||
| 571 | ret = intel_guc_slpc_set_max_freq(slpc, | |||
| 572 | slpc->max_freq_softlimit); | |||
| 573 | } | |||
| 574 | ||||
| 575 | if (unlikely(ret)__builtin_expect(!!(ret), 0)) | |||
| 576 | return ret; | |||
| 577 | ||||
| 578 | if (!slpc->min_freq_softlimit) { | |||
| 579 | ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit); | |||
| 580 | if (unlikely(ret)__builtin_expect(!!(ret), 0)) | |||
| 581 | return ret; | |||
| 582 | slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit; | |||
| 583 | } else { | |||
| 584 | return intel_guc_slpc_set_min_freq(slpc, | |||
| 585 | slpc->min_freq_softlimit); | |||
| 586 | } | |||
| 587 | ||||
| 588 | return 0; | |||
| 589 | } | |||
| 590 | ||||
| 591 | static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc) | |||
| 592 | { | |||
| 593 | /* Force SLPC to used platform rp0 */ | |||
| 594 | return slpc_set_param(slpc, | |||
| 595 | SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ, | |||
| 596 | slpc->rp0_freq); | |||
| 597 | } | |||
| 598 | ||||
| 599 | static void slpc_get_rp_values(struct intel_guc_slpc *slpc) | |||
| 600 | { | |||
| 601 | struct intel_rps *rps = &slpc_to_gt(slpc)->rps; | |||
| 602 | struct intel_rps_freq_caps caps; | |||
| 603 | ||||
| 604 | gen6_rps_get_freq_caps(rps, &caps); | |||
| 605 | slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq); | |||
| 606 | slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq); | |||
| 607 | slpc->min_freq = intel_gpu_freq(rps, caps.min_freq); | |||
| 608 | ||||
| 609 | if (!slpc->boost_freq) | |||
| 610 | slpc->boost_freq = slpc->rp0_freq; | |||
| 611 | } | |||
| 612 | ||||
| 613 | /* | |||
| 614 | * intel_guc_slpc_enable() - Start SLPC | |||
| 615 | * @slpc: pointer to intel_guc_slpc. | |||
| 616 | * | |||
| 617 | * SLPC is enabled by setting up the shared data structure and | |||
| 618 | * sending reset event to GuC SLPC. Initial data is setup in | |||
| 619 | * intel_guc_slpc_init. Here we send the reset event. We do | |||
| 620 | * not currently need a slpc_disable since this is taken care | |||
| 621 | * of automatically when a reset/suspend occurs and the GuC | |||
| 622 | * CTB is destroyed. | |||
| 623 | * | |||
| 624 | * Return: 0 on success, non-zero error code on failure. | |||
| 625 | */ | |||
| 626 | int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) | |||
| 627 | { | |||
| 628 | struct drm_i915_privateinteldrm_softc *i915 = slpc_to_i915(slpc); | |||
| 629 | int ret; | |||
| 630 | ||||
| 631 | GEM_BUG_ON(!slpc->vma)((void)0); | |||
| 632 | ||||
| 633 | slpc_shared_data_reset(slpc->vaddr); | |||
| 634 | ||||
| 635 | ret = slpc_reset(slpc); | |||
| 636 | if (unlikely(ret < 0)__builtin_expect(!!(ret < 0), 0)) { | |||
| ||||
| 637 | i915_probe_error(i915, "SLPC Reset event returned (%pe)\n",__i915_printk(i915, 0 ? "\0017" : "\0013", "SLPC Reset event returned (%pe)\n" , ERR_PTR(ret)) | |||
| 638 | ERR_PTR(ret))__i915_printk(i915, 0 ? "\0017" : "\0013", "SLPC Reset event returned (%pe)\n" , ERR_PTR(ret)); | |||
| 639 | return ret; | |||
| 640 | } | |||
| 641 | ||||
| 642 | ret = slpc_query_task_state(slpc); | |||
| 643 | if (unlikely(ret < 0)__builtin_expect(!!(ret < 0), 0)) | |||
| 644 | return ret; | |||
| 645 | ||||
| 646 | intel_guc_pm_intrmsk_enable(to_gt(i915)); | |||
| 647 | ||||
| 648 | slpc_get_rp_values(slpc); | |||
| 649 | ||||
| 650 | /* Set SLPC max limit to RP0 */ | |||
| 651 | ret = slpc_use_fused_rp0(slpc); | |||
| 652 | if (unlikely(ret)__builtin_expect(!!(ret), 0)) { | |||
| 653 | i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n",__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to set SLPC max to RP0 (%pe)\n" , ERR_PTR(ret)) | |||
| 654 | ERR_PTR(ret))__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to set SLPC max to RP0 (%pe)\n" , ERR_PTR(ret)); | |||
| 655 | return ret; | |||
| 656 | } | |||
| 657 | ||||
| 658 | /* Revert SLPC min/max to softlimits if necessary */ | |||
| 659 | ret = slpc_set_softlimits(slpc); | |||
| 660 | if (unlikely(ret)__builtin_expect(!!(ret), 0)) { | |||
| 661 | i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n",__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to set SLPC softlimits (%pe)\n" , ERR_PTR(ret)) | |||
| 662 | ERR_PTR(ret))__i915_printk(i915, 0 ? "\0017" : "\0013", "Failed to set SLPC softlimits (%pe)\n" , ERR_PTR(ret)); | |||
| 663 | return ret; | |||
| 664 | } | |||
| 665 | ||||
| 666 | /* Set cached media freq ratio mode */ | |||
| 667 | intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode); | |||
| 668 | ||||
| 669 | return 0; | |||
| 670 | } | |||
| 671 | ||||
| 672 | int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val) | |||
| 673 | { | |||
| 674 | int ret = 0; | |||
| 675 | ||||
| 676 | if (val < slpc->min_freq || val > slpc->rp0_freq) | |||
| 677 | return -EINVAL22; | |||
| 678 | ||||
| 679 | mutex_lock(&slpc->lock)rw_enter_write(&slpc->lock); | |||
| 680 | ||||
| 681 | if (slpc->boost_freq != val) { | |||
| 682 | /* Apply only if there are active waiters */ | |||
| 683 | if (atomic_read(&slpc->num_waiters)({ typeof(*(&slpc->num_waiters)) __tmp = *(volatile typeof (*(&slpc->num_waiters)) *)&(*(&slpc->num_waiters )); membar_datadep_consumer(); __tmp; })) { | |||
| 684 | ret = slpc_force_min_freq(slpc, val); | |||
| 685 | if (ret) { | |||
| 686 | ret = -EIO5; | |||
| 687 | goto done; | |||
| 688 | } | |||
| 689 | } | |||
| 690 | ||||
| 691 | slpc->boost_freq = val; | |||
| 692 | } | |||
| 693 | ||||
| 694 | done: | |||
| 695 | mutex_unlock(&slpc->lock)rw_exit_write(&slpc->lock); | |||
| 696 | return ret; | |||
| 697 | } | |||
| 698 | ||||
| 699 | void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc) | |||
| 700 | { | |||
| 701 | /* | |||
| 702 | * Return min back to the softlimit. | |||
| 703 | * This is called during request retire, | |||
| 704 | * so we don't need to fail that if the | |||
| 705 | * set_param fails. | |||
| 706 | */ | |||
| 707 | mutex_lock(&slpc->lock)rw_enter_write(&slpc->lock); | |||
| 708 | if (atomic_dec_and_test(&slpc->num_waiters)(__sync_sub_and_fetch((&slpc->num_waiters), 1) == 0)) | |||
| 709 | slpc_force_min_freq(slpc, slpc->min_freq_softlimit); | |||
| 710 | mutex_unlock(&slpc->lock)rw_exit_write(&slpc->lock); | |||
| 711 | } | |||
| 712 | ||||
| 713 | int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p) | |||
| 714 | { | |||
| 715 | struct drm_i915_privateinteldrm_softc *i915 = slpc_to_i915(slpc); | |||
| 716 | struct slpc_shared_data *data = slpc->vaddr; | |||
| 717 | struct slpc_task_state_data *slpc_tasks; | |||
| 718 | intel_wakeref_t wakeref; | |||
| 719 | int ret = 0; | |||
| 720 | ||||
| 721 | GEM_BUG_ON(!slpc->vma)((void)0); | |||
| 722 | ||||
| 723 | with_intel_runtime_pm(&i915->runtime_pm, wakeref)for ((wakeref) = intel_runtime_pm_get(&i915->runtime_pm ); (wakeref); intel_runtime_pm_put((&i915->runtime_pm) , (wakeref)), (wakeref) = 0) { | |||
| 724 | ret = slpc_query_task_state(slpc); | |||
| 725 | ||||
| 726 | if (!ret) { | |||
| 727 | slpc_tasks = &data->task_state_data; | |||
| 728 | ||||
| 729 | drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc)); | |||
| 730 | drm_printf(p, "\tGTPERF task active: %s\n", | |||
| 731 | str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED((u32)((1UL << (0)) + 0)))); | |||
| 732 | drm_printf(p, "\tMax freq: %u MHz\n", | |||
| 733 | slpc_decode_max_freq(slpc)); | |||
| 734 | drm_printf(p, "\tMin freq: %u MHz\n", | |||
| 735 | slpc_decode_min_freq(slpc)); | |||
| 736 | drm_printf(p, "\twaitboosts: %u\n", | |||
| 737 | slpc->num_boosts); | |||
| 738 | } | |||
| 739 | } | |||
| 740 | ||||
| 741 | return ret; | |||
| 742 | } | |||
| 743 | ||||
| 744 | void intel_guc_slpc_fini(struct intel_guc_slpc *slpc) | |||
| 745 | { | |||
| 746 | if (!slpc->vma) | |||
| 747 | return; | |||
| 748 | ||||
| 749 | i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP(1UL << (0))); | |||
| 750 | } |