| File: | dev/pci/drm/i915/intel_runtime_pm.c |
| Warning: | line 498, column 17 Value stored to 'kdev' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* |
| 2 | * Copyright © 2012-2014 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Eugeni Dodonov <eugeni.dodonov@intel.com> |
| 25 | * Daniel Vetter <daniel.vetter@ffwll.ch> |
| 26 | * |
| 27 | */ |
| 28 | |
| 29 | #include <linux/pm_runtime.h> |
| 30 | |
| 31 | #include <drm/drm_print.h> |
| 32 | |
| 33 | #include "i915_drv.h" |
| 34 | #include "i915_trace.h" |
| 35 | |
| 36 | /** |
| 37 | * DOC: runtime pm |
| 38 | * |
| 39 | * The i915 driver supports dynamic enabling and disabling of entire hardware |
| 40 | * blocks at runtime. This is especially important on the display side where |
| 41 | * software is supposed to control many power gates manually on recent hardware, |
| 42 | * since on the GT side a lot of the power management is done by the hardware. |
| 43 | * But even there some manual control at the device level is required. |
| 44 | * |
| 45 | * Since i915 supports a diverse set of platforms with a unified codebase and |
| 46 | * hardware engineers just love to shuffle functionality around between power |
| 47 | * domains there's a sizeable amount of indirection required. This file provides |
| 48 | * generic functions to the driver for grabbing and releasing references for |
| 49 | * abstract power domains. It then maps those to the actual power wells |
| 50 | * present for a given platform. |
| 51 | */ |
| 52 | |
| 53 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0 |
| 54 | |
| 55 | #include <linux/sort.h> |
| 56 | |
| 57 | #define STACKDEPTH 8 |
| 58 | |
| 59 | static noinline__attribute__((__noinline__)) depot_stack_handle_t __save_depot_stack(void) |
| 60 | { |
| 61 | unsigned long entries[STACKDEPTH]; |
| 62 | unsigned int n; |
| 63 | |
| 64 | n = stack_trace_save(entries, ARRAY_SIZE(entries)(sizeof((entries)) / sizeof((entries)[0])), 1); |
| 65 | return stack_depot_save(entries, n, GFP_NOWAIT0x0002 | __GFP_NOWARN0); |
| 66 | } |
| 67 | |
| 68 | static void __print_depot_stack(depot_stack_handle_t stack, |
| 69 | char *buf, int sz, int indent) |
| 70 | { |
| 71 | unsigned long *entries; |
| 72 | unsigned int nr_entries; |
| 73 | |
| 74 | nr_entries = stack_depot_fetch(stack, &entries); |
| 75 | stack_trace_snprint(buf, sz, entries, nr_entries, indent); |
| 76 | } |
| 77 | |
| 78 | static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) |
| 79 | { |
| 80 | mtx_init(&rpm->debug.lock, IPL_TTY)do { (void)(((void *)0)); (void)(0); __mtx_init((&rpm-> debug.lock), ((((0x9)) > 0x0 && ((0x9)) < 0x9) ? 0x9 : ((0x9)))); } while (0); |
| 81 | } |
| 82 | |
| 83 | static noinline__attribute__((__noinline__)) depot_stack_handle_t |
| 84 | track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) |
| 85 | { |
| 86 | depot_stack_handle_t stack, *stacks; |
| 87 | unsigned long flags; |
| 88 | |
| 89 | if (!rpm->available) |
| 90 | return -1; |
| 91 | |
| 92 | stack = __save_depot_stack(); |
| 93 | if (!stack) |
| 94 | return -1; |
| 95 | |
| 96 | spin_lock_irqsave(&rpm->debug.lock, flags)do { flags = 0; mtx_enter(&rpm->debug.lock); } while ( 0); |
| 97 | |
| 98 | if (!rpm->debug.count) |
| 99 | rpm->debug.last_acquire = stack; |
| 100 | |
| 101 | stacks = krealloc(rpm->debug.owners, |
| 102 | (rpm->debug.count + 1) * sizeof(*stacks), |
| 103 | GFP_NOWAIT0x0002 | __GFP_NOWARN0); |
| 104 | if (stacks) { |
| 105 | stacks[rpm->debug.count++] = stack; |
| 106 | rpm->debug.owners = stacks; |
| 107 | } else { |
| 108 | stack = -1; |
| 109 | } |
| 110 | |
| 111 | spin_unlock_irqrestore(&rpm->debug.lock, flags)do { (void)(flags); mtx_leave(&rpm->debug.lock); } while (0); |
| 112 | |
| 113 | return stack; |
| 114 | } |
| 115 | |
| 116 | static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, |
| 117 | depot_stack_handle_t stack) |
| 118 | { |
| 119 | struct drm_i915_privateinteldrm_softc *i915 = container_of(rpm,({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}) |
| 120 | struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}) |
| 121 | runtime_pm)({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}); |
| 122 | unsigned long flags, n; |
| 123 | bool_Bool found = false0; |
| 124 | |
| 125 | if (unlikely(stack == -1)__builtin_expect(!!(stack == -1), 0)) |
| 126 | return; |
| 127 | |
| 128 | spin_lock_irqsave(&rpm->debug.lock, flags)do { flags = 0; mtx_enter(&rpm->debug.lock); } while ( 0); |
| 129 | for (n = rpm->debug.count; n--; ) { |
| 130 | if (rpm->debug.owners[n] == stack) { |
| 131 | memmove(rpm->debug.owners + n,__builtin_memmove((rpm->debug.owners + n), (rpm->debug. owners + n + 1), ((--rpm->debug.count - n) * sizeof(stack) )) |
| 132 | rpm->debug.owners + n + 1,__builtin_memmove((rpm->debug.owners + n), (rpm->debug. owners + n + 1), ((--rpm->debug.count - n) * sizeof(stack) )) |
| 133 | (--rpm->debug.count - n) * sizeof(stack))__builtin_memmove((rpm->debug.owners + n), (rpm->debug. owners + n + 1), ((--rpm->debug.count - n) * sizeof(stack) )); |
| 134 | found = true1; |
| 135 | break; |
| 136 | } |
| 137 | } |
| 138 | spin_unlock_irqrestore(&rpm->debug.lock, flags)do { (void)(flags); mtx_leave(&rpm->debug.lock); } while (0); |
| 139 | |
| 140 | if (drm_WARN(&i915->drm, !found,({ int __ret = !!(!found); if (__ret) printf("%s %s: " "Unmatched wakeref (tracking %lu), count %u\n" , dev_driver_string((&i915->drm)->dev), "", rpm-> debug.count, ({ typeof(*(&rpm->wakeref_count)) __tmp = *(volatile typeof(*(&rpm->wakeref_count)) *)&(*(& rpm->wakeref_count)); membar_datadep_consumer(); __tmp; }) ); __builtin_expect(!!(__ret), 0); }) |
| 141 | "Unmatched wakeref (tracking %lu), count %u\n",({ int __ret = !!(!found); if (__ret) printf("%s %s: " "Unmatched wakeref (tracking %lu), count %u\n" , dev_driver_string((&i915->drm)->dev), "", rpm-> debug.count, ({ typeof(*(&rpm->wakeref_count)) __tmp = *(volatile typeof(*(&rpm->wakeref_count)) *)&(*(& rpm->wakeref_count)); membar_datadep_consumer(); __tmp; }) ); __builtin_expect(!!(__ret), 0); }) |
| 142 | rpm->debug.count, atomic_read(&rpm->wakeref_count))({ int __ret = !!(!found); if (__ret) printf("%s %s: " "Unmatched wakeref (tracking %lu), count %u\n" , dev_driver_string((&i915->drm)->dev), "", rpm-> debug.count, ({ typeof(*(&rpm->wakeref_count)) __tmp = *(volatile typeof(*(&rpm->wakeref_count)) *)&(*(& rpm->wakeref_count)); membar_datadep_consumer(); __tmp; }) ); __builtin_expect(!!(__ret), 0); })) { |
| 143 | char *buf; |
| 144 | |
| 145 | buf = kmalloc(PAGE_SIZE(1 << 12), GFP_NOWAIT0x0002 | __GFP_NOWARN0); |
| 146 | if (!buf) |
| 147 | return; |
| 148 | |
| 149 | __print_depot_stack(stack, buf, PAGE_SIZE(1 << 12), 2); |
| 150 | DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf)__drm_dbg(DRM_UT_DRIVER, "wakeref %x from\n%s", stack, buf); |
| 151 | |
| 152 | stack = READ_ONCE(rpm->debug.last_release)({ typeof(rpm->debug.last_release) __tmp = *(volatile typeof (rpm->debug.last_release) *)&(rpm->debug.last_release ); membar_datadep_consumer(); __tmp; }); |
| 153 | if (stack) { |
| 154 | __print_depot_stack(stack, buf, PAGE_SIZE(1 << 12), 2); |
| 155 | DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf)__drm_dbg(DRM_UT_DRIVER, "wakeref last released at\n%s", buf); |
| 156 | } |
| 157 | |
| 158 | kfree(buf); |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | static int cmphandle(const void *_a, const void *_b) |
| 163 | { |
| 164 | const depot_stack_handle_t * const a = _a, * const b = _b; |
| 165 | |
| 166 | if (*a < *b) |
| 167 | return -1; |
| 168 | else if (*a > *b) |
| 169 | return 1; |
| 170 | else |
| 171 | return 0; |
| 172 | } |
| 173 | |
| 174 | static void |
| 175 | __print_intel_runtime_pm_wakeref(struct drm_printer *p, |
| 176 | const struct intel_runtime_pm_debug *dbg) |
| 177 | { |
| 178 | unsigned long i; |
| 179 | char *buf; |
| 180 | |
| 181 | buf = kmalloc(PAGE_SIZE(1 << 12), GFP_NOWAIT0x0002 | __GFP_NOWARN0); |
| 182 | if (!buf) |
| 183 | return; |
| 184 | |
| 185 | if (dbg->last_acquire) { |
| 186 | __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE(1 << 12), 2); |
| 187 | drm_printf(p, "Wakeref last acquired:\n%s", buf); |
| 188 | } |
| 189 | |
| 190 | if (dbg->last_release) { |
| 191 | __print_depot_stack(dbg->last_release, buf, PAGE_SIZE(1 << 12), 2); |
| 192 | drm_printf(p, "Wakeref last released:\n%s", buf); |
| 193 | } |
| 194 | |
| 195 | drm_printf(p, "Wakeref count: %lu\n", dbg->count); |
| 196 | |
| 197 | sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL((void *)0)); |
| 198 | |
| 199 | for (i = 0; i < dbg->count; i++) { |
| 200 | depot_stack_handle_t stack = dbg->owners[i]; |
| 201 | unsigned long rep; |
| 202 | |
| 203 | rep = 1; |
| 204 | while (i + 1 < dbg->count && dbg->owners[i + 1] == stack) |
| 205 | rep++, i++; |
| 206 | __print_depot_stack(stack, buf, PAGE_SIZE(1 << 12), 2); |
| 207 | drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf); |
| 208 | } |
| 209 | |
| 210 | kfree(buf); |
| 211 | } |
| 212 | |
| 213 | static noinline__attribute__((__noinline__)) void |
| 214 | __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug, |
| 215 | struct intel_runtime_pm_debug *saved) |
| 216 | { |
| 217 | *saved = *debug; |
| 218 | |
| 219 | debug->owners = NULL((void *)0); |
| 220 | debug->count = 0; |
| 221 | debug->last_release = __save_depot_stack(); |
| 222 | } |
| 223 | |
| 224 | static void |
| 225 | dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) |
| 226 | { |
| 227 | if (debug->count) { |
| 228 | struct drm_printer p = drm_debug_printer("i915"); |
| 229 | |
| 230 | __print_intel_runtime_pm_wakeref(&p, debug); |
| 231 | } |
| 232 | |
| 233 | kfree(debug->owners); |
| 234 | } |
| 235 | |
| 236 | static noinline__attribute__((__noinline__)) void |
| 237 | __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) |
| 238 | { |
| 239 | struct intel_runtime_pm_debug dbg = {}; |
| 240 | unsigned long flags; |
| 241 | |
| 242 | if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,atomic_dec_and_lock(&rpm->wakeref_count, &rpm-> debug.lock) |
| 243 | &rpm->debug.lock,atomic_dec_and_lock(&rpm->wakeref_count, &rpm-> debug.lock) |
| 244 | flags)atomic_dec_and_lock(&rpm->wakeref_count, &rpm-> debug.lock)) |
| 245 | return; |
| 246 | |
| 247 | __untrack_all_wakerefs(&rpm->debug, &dbg); |
| 248 | spin_unlock_irqrestore(&rpm->debug.lock, flags)do { (void)(flags); mtx_leave(&rpm->debug.lock); } while (0); |
| 249 | |
| 250 | dump_and_free_wakeref_tracking(&dbg); |
| 251 | } |
| 252 | |
| 253 | static noinline__attribute__((__noinline__)) void |
| 254 | untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) |
| 255 | { |
| 256 | struct intel_runtime_pm_debug dbg = {}; |
| 257 | unsigned long flags; |
| 258 | |
| 259 | spin_lock_irqsave(&rpm->debug.lock, flags)do { flags = 0; mtx_enter(&rpm->debug.lock); } while ( 0); |
| 260 | __untrack_all_wakerefs(&rpm->debug, &dbg); |
| 261 | spin_unlock_irqrestore(&rpm->debug.lock, flags)do { (void)(flags); mtx_leave(&rpm->debug.lock); } while (0); |
| 262 | |
| 263 | dump_and_free_wakeref_tracking(&dbg); |
| 264 | } |
| 265 | |
| 266 | void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, |
| 267 | struct drm_printer *p) |
| 268 | { |
| 269 | struct intel_runtime_pm_debug dbg = {}; |
| 270 | |
| 271 | do { |
| 272 | unsigned long alloc = dbg.count; |
| 273 | depot_stack_handle_t *s; |
| 274 | |
| 275 | spin_lock_irq(&rpm->debug.lock)mtx_enter(&rpm->debug.lock); |
| 276 | dbg.count = rpm->debug.count; |
| 277 | if (dbg.count <= alloc) { |
| 278 | memcpy(dbg.owners,__builtin_memcpy((dbg.owners), (rpm->debug.owners), (dbg.count * sizeof(*s))) |
| 279 | rpm->debug.owners,__builtin_memcpy((dbg.owners), (rpm->debug.owners), (dbg.count * sizeof(*s))) |
| 280 | dbg.count * sizeof(*s))__builtin_memcpy((dbg.owners), (rpm->debug.owners), (dbg.count * sizeof(*s))); |
| 281 | } |
| 282 | dbg.last_acquire = rpm->debug.last_acquire; |
| 283 | dbg.last_release = rpm->debug.last_release; |
| 284 | spin_unlock_irq(&rpm->debug.lock)mtx_leave(&rpm->debug.lock); |
| 285 | if (dbg.count <= alloc) |
| 286 | break; |
| 287 | |
| 288 | s = krealloc(dbg.owners, |
| 289 | dbg.count * sizeof(*s), |
| 290 | GFP_NOWAIT0x0002 | __GFP_NOWARN0); |
| 291 | if (!s) |
| 292 | goto out; |
| 293 | |
| 294 | dbg.owners = s; |
| 295 | } while (1); |
| 296 | |
| 297 | __print_intel_runtime_pm_wakeref(p, &dbg); |
| 298 | |
| 299 | out: |
| 300 | kfree(dbg.owners); |
| 301 | } |
| 302 | |
| 303 | #else |
| 304 | |
| 305 | static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) |
| 306 | { |
| 307 | } |
| 308 | |
| 309 | static depot_stack_handle_t |
| 310 | track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) |
| 311 | { |
| 312 | return -1; |
| 313 | } |
| 314 | |
| 315 | static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, |
| 316 | intel_wakeref_t wref) |
| 317 | { |
| 318 | } |
| 319 | |
| 320 | static void |
| 321 | __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) |
| 322 | { |
| 323 | atomic_dec(&rpm->wakeref_count)__sync_fetch_and_sub(&rpm->wakeref_count, 1); |
| 324 | } |
| 325 | |
| 326 | static void |
| 327 | untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) |
| 328 | { |
| 329 | } |
| 330 | |
| 331 | #endif |
| 332 | |
| 333 | static void |
| 334 | intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool_Bool wakelock) |
| 335 | { |
| 336 | if (wakelock) { |
| 337 | atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count)__sync_fetch_and_add(&rpm->wakeref_count, 1 + (1 << ((8 * sizeof((((struct intel_runtime_pm *)0)->wakeref_count ))) / 2))); |
| 338 | assert_rpm_wakelock_held(rpm); |
| 339 | } else { |
| 340 | atomic_inc(&rpm->wakeref_count)__sync_fetch_and_add(&rpm->wakeref_count, 1); |
| 341 | assert_rpm_raw_wakeref_held(rpm); |
| 342 | } |
| 343 | } |
| 344 | |
| 345 | static void |
| 346 | intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock) |
| 347 | { |
| 348 | if (wakelock) { |
| 349 | assert_rpm_wakelock_held(rpm); |
| 350 | atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count)__sync_fetch_and_sub(&rpm->wakeref_count, (1 << ( (8 * sizeof((((struct intel_runtime_pm *)0)->wakeref_count ))) / 2))); |
| 351 | } else { |
| 352 | assert_rpm_raw_wakeref_held(rpm); |
| 353 | } |
| 354 | |
| 355 | __intel_wakeref_dec_and_check_tracking(rpm); |
| 356 | } |
| 357 | |
| 358 | static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, |
| 359 | bool_Bool wakelock) |
| 360 | { |
| 361 | struct drm_i915_privateinteldrm_softc *i915 = container_of(rpm,({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}) |
| 362 | struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}) |
| 363 | runtime_pm)({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}); |
| 364 | int ret; |
| 365 | |
| 366 | ret = pm_runtime_get_sync(rpm->kdev); |
| 367 | drm_WARN_ONCE(&i915->drm, ret < 0,({ static int __warned; int __ret = !!(ret < 0); if (__ret && !__warned) { printf("%s %s: " "pm_runtime_get_sync() failed: %d\n" , dev_driver_string((&i915->drm)->dev), "", ret); __warned = 1; } __builtin_expect(!!(__ret), 0); }) |
| 368 | "pm_runtime_get_sync() failed: %d\n", ret)({ static int __warned; int __ret = !!(ret < 0); if (__ret && !__warned) { printf("%s %s: " "pm_runtime_get_sync() failed: %d\n" , dev_driver_string((&i915->drm)->dev), "", ret); __warned = 1; } __builtin_expect(!!(__ret), 0); }); |
| 369 | |
| 370 | intel_runtime_pm_acquire(rpm, wakelock); |
| 371 | |
| 372 | return track_intel_runtime_pm_wakeref(rpm); |
| 373 | } |
| 374 | |
| 375 | /** |
| 376 | * intel_runtime_pm_get_raw - grab a raw runtime pm reference |
| 377 | * @rpm: the intel_runtime_pm structure |
| 378 | * |
| 379 | * This is the unlocked version of intel_display_power_is_enabled() and should |
| 380 | * only be used from error capture and recovery code where deadlocks are |
| 381 | * possible. |
| 382 | * This function grabs a device-level runtime pm reference (mostly used for |
| 383 | * asynchronous PM management from display code) and ensures that it is powered |
| 384 | * up. Raw references are not considered during wakelock assert checks. |
| 385 | * |
| 386 | * Any runtime pm reference obtained by this function must have a symmetric |
| 387 | * call to intel_runtime_pm_put_raw() to release the reference again. |
| 388 | * |
| 389 | * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates |
| 390 | * as True if the wakeref was acquired, or False otherwise. |
| 391 | */ |
| 392 | intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm) |
| 393 | { |
| 394 | return __intel_runtime_pm_get(rpm, false0); |
| 395 | } |
| 396 | |
| 397 | /** |
| 398 | * intel_runtime_pm_get - grab a runtime pm reference |
| 399 | * @rpm: the intel_runtime_pm structure |
| 400 | * |
| 401 | * This function grabs a device-level runtime pm reference (mostly used for GEM |
| 402 | * code to ensure the GTT or GT is on) and ensures that it is powered up. |
| 403 | * |
| 404 | * Any runtime pm reference obtained by this function must have a symmetric |
| 405 | * call to intel_runtime_pm_put() to release the reference again. |
| 406 | * |
| 407 | * Returns: the wakeref cookie to pass to intel_runtime_pm_put() |
| 408 | */ |
| 409 | intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm) |
| 410 | { |
| 411 | return __intel_runtime_pm_get(rpm, true1); |
| 412 | } |
| 413 | |
| 414 | /** |
| 415 | * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active |
| 416 | * @rpm: the intel_runtime_pm structure |
| 417 | * @ignore_usecount: get a ref even if dev->power.usage_count is 0 |
| 418 | * |
| 419 | * This function grabs a device-level runtime pm reference if the device is |
| 420 | * already active and ensures that it is powered up. It is illegal to try |
| 421 | * and access the HW should intel_runtime_pm_get_if_active() report failure. |
| 422 | * |
| 423 | * If @ignore_usecount=true, a reference will be acquired even if there is no |
| 424 | * user requiring the device to be powered up (dev->power.usage_count == 0). |
| 425 | * If the function returns false in this case then it's guaranteed that the |
| 426 | * device's runtime suspend hook has been called already or that it will be |
| 427 | * called (and hence it's also guaranteed that the device's runtime resume |
| 428 | * hook will be called eventually). |
| 429 | * |
| 430 | * Any runtime pm reference obtained by this function must have a symmetric |
| 431 | * call to intel_runtime_pm_put() to release the reference again. |
| 432 | * |
| 433 | * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates |
| 434 | * as True if the wakeref was acquired, or False otherwise. |
| 435 | */ |
| 436 | static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm, |
| 437 | bool_Bool ignore_usecount) |
| 438 | { |
| 439 | if (IS_ENABLED(CONFIG_PM)0) { |
| 440 | /* |
| 441 | * In cases runtime PM is disabled by the RPM core and we get |
| 442 | * an -EINVAL return value we are not supposed to call this |
| 443 | * function, since the power state is undefined. This applies |
| 444 | * atm to the late/early system suspend/resume handlers. |
| 445 | */ |
| 446 | if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0) |
| 447 | return 0; |
| 448 | } |
| 449 | |
| 450 | intel_runtime_pm_acquire(rpm, true1); |
| 451 | |
| 452 | return track_intel_runtime_pm_wakeref(rpm); |
| 453 | } |
| 454 | |
| 455 | intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) |
| 456 | { |
| 457 | return __intel_runtime_pm_get_if_active(rpm, false0); |
| 458 | } |
| 459 | |
| 460 | intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm) |
| 461 | { |
| 462 | return __intel_runtime_pm_get_if_active(rpm, true1); |
| 463 | } |
| 464 | |
| 465 | /** |
| 466 | * intel_runtime_pm_get_noresume - grab a runtime pm reference |
| 467 | * @rpm: the intel_runtime_pm structure |
| 468 | * |
| 469 | * This function grabs a device-level runtime pm reference (mostly used for GEM |
| 470 | * code to ensure the GTT or GT is on). |
| 471 | * |
| 472 | * It will _not_ power up the device but instead only check that it's powered |
| 473 | * on. Therefore it is only valid to call this functions from contexts where |
| 474 | * the device is known to be powered up and where trying to power it up would |
| 475 | * result in hilarity and deadlocks. That pretty much means only the system |
| 476 | * suspend/resume code where this is used to grab runtime pm references for |
| 477 | * delayed setup down in work items. |
| 478 | * |
| 479 | * Any runtime pm reference obtained by this function must have a symmetric |
| 480 | * call to intel_runtime_pm_put() to release the reference again. |
| 481 | * |
| 482 | * Returns: the wakeref cookie to pass to intel_runtime_pm_put() |
| 483 | */ |
| 484 | intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm) |
| 485 | { |
| 486 | assert_rpm_wakelock_held(rpm); |
| 487 | pm_runtime_get_noresume(rpm->kdev); |
| 488 | |
| 489 | intel_runtime_pm_acquire(rpm, true1); |
| 490 | |
| 491 | return track_intel_runtime_pm_wakeref(rpm); |
| 492 | } |
| 493 | |
| 494 | static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm, |
| 495 | intel_wakeref_t wref, |
| 496 | bool_Bool wakelock) |
| 497 | { |
| 498 | struct device *kdev = rpm->kdev; |
Value stored to 'kdev' during its initialization is never read | |
| 499 | |
| 500 | untrack_intel_runtime_pm_wakeref(rpm, wref); |
| 501 | |
| 502 | intel_runtime_pm_release(rpm, wakelock); |
| 503 | |
| 504 | pm_runtime_mark_last_busy(kdev); |
| 505 | pm_runtime_put_autosuspend(kdev); |
| 506 | } |
| 507 | |
| 508 | /** |
| 509 | * intel_runtime_pm_put_raw - release a raw runtime pm reference |
| 510 | * @rpm: the intel_runtime_pm structure |
| 511 | * @wref: wakeref acquired for the reference that is being released |
| 512 | * |
| 513 | * This function drops the device-level runtime pm reference obtained by |
| 514 | * intel_runtime_pm_get_raw() and might power down the corresponding |
| 515 | * hardware block right away if this is the last reference. |
| 516 | */ |
| 517 | void |
| 518 | intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref) |
| 519 | { |
| 520 | __intel_runtime_pm_put(rpm, wref, false0); |
| 521 | } |
| 522 | |
| 523 | /** |
| 524 | * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference |
| 525 | * @rpm: the intel_runtime_pm structure |
| 526 | * |
| 527 | * This function drops the device-level runtime pm reference obtained by |
| 528 | * intel_runtime_pm_get() and might power down the corresponding |
| 529 | * hardware block right away if this is the last reference. |
| 530 | * |
| 531 | * This function exists only for historical reasons and should be avoided in |
| 532 | * new code, as the correctness of its use cannot be checked. Always use |
| 533 | * intel_runtime_pm_put() instead. |
| 534 | */ |
| 535 | void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm) |
| 536 | { |
| 537 | __intel_runtime_pm_put(rpm, -1, true1); |
| 538 | } |
| 539 | |
| 540 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)0 |
| 541 | /** |
| 542 | * intel_runtime_pm_put - release a runtime pm reference |
| 543 | * @rpm: the intel_runtime_pm structure |
| 544 | * @wref: wakeref acquired for the reference that is being released |
| 545 | * |
| 546 | * This function drops the device-level runtime pm reference obtained by |
| 547 | * intel_runtime_pm_get() and might power down the corresponding |
| 548 | * hardware block right away if this is the last reference. |
| 549 | */ |
| 550 | void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) |
| 551 | { |
| 552 | __intel_runtime_pm_put(rpm, wref, true1); |
| 553 | } |
| 554 | #endif |
| 555 | |
| 556 | /** |
| 557 | * intel_runtime_pm_enable - enable runtime pm |
| 558 | * @rpm: the intel_runtime_pm structure |
| 559 | * |
| 560 | * This function enables runtime pm at the end of the driver load sequence. |
| 561 | * |
| 562 | * Note that this function does currently not enable runtime pm for the |
| 563 | * subordinate display power domains. That is done by |
| 564 | * intel_power_domains_enable(). |
| 565 | */ |
| 566 | void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) |
| 567 | { |
| 568 | struct drm_i915_privateinteldrm_softc *i915 = container_of(rpm,({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}) |
| 569 | struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}) |
| 570 | runtime_pm)({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}); |
| 571 | struct device *kdev = rpm->kdev; |
| 572 | |
| 573 | /* |
| 574 | * Disable the system suspend direct complete optimization, which can |
| 575 | * leave the device suspended skipping the driver's suspend handlers |
| 576 | * if the device was already runtime suspended. This is needed due to |
| 577 | * the difference in our runtime and system suspend sequence and |
| 578 | * becaue the HDA driver may require us to enable the audio power |
| 579 | * domain during system suspend. |
| 580 | */ |
| 581 | dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE); |
| 582 | |
| 583 | pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ |
| 584 | pm_runtime_mark_last_busy(kdev); |
| 585 | |
| 586 | /* |
| 587 | * Take a permanent reference to disable the RPM functionality and drop |
| 588 | * it only when unloading the driver. Use the low level get/put helpers, |
| 589 | * so the driver's own RPM reference tracking asserts also work on |
| 590 | * platforms without RPM support. |
| 591 | */ |
| 592 | if (!rpm->available) { |
| 593 | int ret; |
| 594 | |
| 595 | pm_runtime_dont_use_autosuspend(kdev); |
| 596 | ret = pm_runtime_get_sync(kdev); |
| 597 | drm_WARN(&i915->drm, ret < 0,({ int __ret = !!(ret < 0); if (__ret) printf("%s %s: " "pm_runtime_get_sync() failed: %d\n" , dev_driver_string((&i915->drm)->dev), "", ret); __builtin_expect (!!(__ret), 0); }) |
| 598 | "pm_runtime_get_sync() failed: %d\n", ret)({ int __ret = !!(ret < 0); if (__ret) printf("%s %s: " "pm_runtime_get_sync() failed: %d\n" , dev_driver_string((&i915->drm)->dev), "", ret); __builtin_expect (!!(__ret), 0); }); |
| 599 | } else { |
| 600 | pm_runtime_use_autosuspend(kdev); |
| 601 | } |
| 602 | |
| 603 | /* |
| 604 | * The core calls the driver load handler with an RPM reference held. |
| 605 | * We drop that here and will reacquire it during unloading in |
| 606 | * intel_power_domains_fini(). |
| 607 | */ |
| 608 | pm_runtime_put_autosuspend(kdev); |
| 609 | } |
| 610 | |
| 611 | void intel_runtime_pm_disable(struct intel_runtime_pm *rpm) |
| 612 | { |
| 613 | struct drm_i915_privateinteldrm_softc *i915 = container_of(rpm,({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}) |
| 614 | struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}) |
| 615 | runtime_pm)({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}); |
| 616 | struct device *kdev = rpm->kdev; |
| 617 | |
| 618 | /* Transfer rpm ownership back to core */ |
| 619 | drm_WARN(&i915->drm, pm_runtime_get_sync(kdev) < 0,({ int __ret = !!(pm_runtime_get_sync(kdev) < 0); if (__ret ) printf("%s %s: " "Failed to pass rpm ownership back to core\n" , dev_driver_string((&i915->drm)->dev), ""); __builtin_expect (!!(__ret), 0); }) |
| 620 | "Failed to pass rpm ownership back to core\n")({ int __ret = !!(pm_runtime_get_sync(kdev) < 0); if (__ret ) printf("%s %s: " "Failed to pass rpm ownership back to core\n" , dev_driver_string((&i915->drm)->dev), ""); __builtin_expect (!!(__ret), 0); }); |
| 621 | |
| 622 | pm_runtime_dont_use_autosuspend(kdev); |
| 623 | |
| 624 | if (!rpm->available) |
| 625 | pm_runtime_put(kdev); |
| 626 | } |
| 627 | |
| 628 | void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm) |
| 629 | { |
| 630 | struct drm_i915_privateinteldrm_softc *i915 = container_of(rpm,({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}) |
| 631 | struct drm_i915_private,({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}) |
| 632 | runtime_pm)({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}); |
| 633 | int count = atomic_read(&rpm->wakeref_count)({ typeof(*(&rpm->wakeref_count)) __tmp = *(volatile typeof (*(&rpm->wakeref_count)) *)&(*(&rpm->wakeref_count )); membar_datadep_consumer(); __tmp; }); |
| 634 | |
| 635 | drm_WARN(&i915->drm, count,({ int __ret = !!(count); if (__ret) printf("%s %s: " "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n" , dev_driver_string((&i915->drm)->dev), "", intel_rpm_raw_wakeref_count (count), intel_rpm_wakelock_count(count)); __builtin_expect(! !(__ret), 0); }) |
| 636 | "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",({ int __ret = !!(count); if (__ret) printf("%s %s: " "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n" , dev_driver_string((&i915->drm)->dev), "", intel_rpm_raw_wakeref_count (count), intel_rpm_wakelock_count(count)); __builtin_expect(! !(__ret), 0); }) |
| 637 | intel_rpm_raw_wakeref_count(count),({ int __ret = !!(count); if (__ret) printf("%s %s: " "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n" , dev_driver_string((&i915->drm)->dev), "", intel_rpm_raw_wakeref_count (count), intel_rpm_wakelock_count(count)); __builtin_expect(! !(__ret), 0); }) |
| 638 | intel_rpm_wakelock_count(count))({ int __ret = !!(count); if (__ret) printf("%s %s: " "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n" , dev_driver_string((&i915->drm)->dev), "", intel_rpm_raw_wakeref_count (count), intel_rpm_wakelock_count(count)); __builtin_expect(! !(__ret), 0); }); |
| 639 | |
| 640 | untrack_all_intel_runtime_pm_wakerefs(rpm); |
| 641 | } |
| 642 | |
| 643 | void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm) |
| 644 | { |
| 645 | struct drm_i915_privateinteldrm_softc *i915 = |
| 646 | container_of(rpm, struct drm_i915_private, runtime_pm)({ const __typeof( ((struct inteldrm_softc *)0)->runtime_pm ) *__mptr = (rpm); (struct inteldrm_softc *)( (char *)__mptr - __builtin_offsetof(struct inteldrm_softc, runtime_pm) );}); |
| 647 | #ifdef notyet |
| 648 | struct pci_dev *pdev = i915->drm.pdev; |
| 649 | struct device *kdev = &pdev->dev; |
| 650 | |
| 651 | rpm->kdev = kdev; |
| 652 | #endif |
| 653 | rpm->available = HAS_RUNTIME_PM(i915)((&(i915)->__info)->has_runtime_pm); |
| 654 | |
| 655 | init_intel_runtime_pm_wakeref(rpm); |
| 656 | } |