Bug Summary

File:dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
Warning:line 677, column 20
Value stored to 'skutable' during its initialization is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name smu_v13_0_0_ppt.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
1/*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#define SWSMU_CODE_LAYER_L2
25
26#include <linux/firmware.h>
27#include <linux/pci.h>
28#include <linux/i2c.h>
29#include "amdgpu.h"
30#include "amdgpu_smu.h"
31#include "atomfirmware.h"
32#include "amdgpu_atomfirmware.h"
33#include "amdgpu_atombios.h"
34#include "smu_v13_0.h"
35#include "smu13_driver_if_v13_0_0.h"
36#include "soc15_common.h"
37#include "atom.h"
38#include "smu_v13_0_0_ppt.h"
39#include "smu_v13_0_0_pptable.h"
40#include "smu_v13_0_0_ppsmc.h"
41#include "nbio/nbio_4_3_0_offset.h"
42#include "nbio/nbio_4_3_0_sh_mask.h"
43#include "mp/mp_13_0_0_offset.h"
44#include "mp/mp_13_0_0_sh_mask.h"
45
46#include "asic_reg/mp/mp_13_0_0_sh_mask.h"
47#include "smu_cmn.h"
48#include "amdgpu_ras.h"
49
50/*
51 * DO NOT use these for err/warn/info/debug messages.
52 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
53 * They are more MGPU friendly.
54 */
55#undef pr_err
56#undef pr_warn
57#undef pr_info
58#undef pr_debug
59
60#define to_amdgpu_device(x)(({ const __typeof( ((struct amdgpu_device *)0)->pm.smu_i2c
) *__mptr = (x); (struct amdgpu_device *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_device, pm.smu_i2c) );}))
(container_of(x, struct amdgpu_device, pm.smu_i2c)({ const __typeof( ((struct amdgpu_device *)0)->pm.smu_i2c
) *__mptr = (x); (struct amdgpu_device *)( (char *)__mptr - __builtin_offsetof
(struct amdgpu_device, pm.smu_i2c) );})
)
61
62#define FEATURE_MASK(feature)(1ULL << feature) (1ULL << feature)
63#define SMC_DPM_FEATURE( (1ULL << 1) | (1ULL << 3) | (1ULL << 7) |
(1ULL << 5) | (1ULL << 4) | (1ULL << 6))
( \
64 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)(1ULL << 1) | \
65 FEATURE_MASK(FEATURE_DPM_UCLK_BIT)(1ULL << 3) | \
66 FEATURE_MASK(FEATURE_DPM_LINK_BIT)(1ULL << 7) | \
67 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)(1ULL << 5) | \
68 FEATURE_MASK(FEATURE_DPM_FCLK_BIT)(1ULL << 4) | \
69 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)(1ULL << 6))
70
71#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE0x4000 0x4000
72
73#define mmMP1_SMN_C2PMSG_660x0282 0x0282
74#define mmMP1_SMN_C2PMSG_66_BASE_IDX0 0
75
76#define mmMP1_SMN_C2PMSG_820x0292 0x0292
77#define mmMP1_SMN_C2PMSG_82_BASE_IDX0 0
78
79#define mmMP1_SMN_C2PMSG_900x029a 0x029a
80#define mmMP1_SMN_C2PMSG_90_BASE_IDX0 0
81
82#define mmMP1_SMN_C2PMSG_750x028b 0x028b
83#define mmMP1_SMN_C2PMSG_75_BASE_IDX0 0
84
85#define mmMP1_SMN_C2PMSG_530x0275 0x0275
86#define mmMP1_SMN_C2PMSG_53_BASE_IDX0 0
87
88#define mmMP1_SMN_C2PMSG_540x0276 0x0276
89#define mmMP1_SMN_C2PMSG_54_BASE_IDX0 0
90
91#define DEBUGSMC_MSG_Mode1Reset2 2
92
93static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = {
94 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1)[SMU_MSG_TestMessage] = {1, (0x1), (1)},
95 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1)[SMU_MSG_GetSmuVersion] = {1, (0x2), (1)},
96 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1)[SMU_MSG_GetDriverIfVersion] = {1, (0x3), (1)},
97 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0)[SMU_MSG_SetAllowedFeaturesMaskLow] = {1, (0x4), (0)},
98 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0)[SMU_MSG_SetAllowedFeaturesMaskHigh] = {1, (0x5), (0)},
99 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0)[SMU_MSG_EnableAllSmuFeatures] = {1, (0x6), (0)},
100 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0)[SMU_MSG_DisableAllSmuFeatures] = {1, (0x7), (0)},
101 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1)[SMU_MSG_EnableSmuFeaturesLow] = {1, (0x8), (1)},
102 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1)[SMU_MSG_EnableSmuFeaturesHigh] = {1, (0x9), (1)},
103 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1)[SMU_MSG_DisableSmuFeaturesLow] = {1, (0xA), (1)},
104 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1)[SMU_MSG_DisableSmuFeaturesHigh] = {1, (0xB), (1)},
105 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1)[SMU_MSG_GetEnabledSmuFeaturesLow] = {1, (0xC), (1)},
106 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1)[SMU_MSG_GetEnabledSmuFeaturesHigh] = {1, (0xD), (1)},
107 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1)[SMU_MSG_SetWorkloadMask] = {1, (0x24), (1)},
108 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0)[SMU_MSG_SetPptLimit] = {1, (0x32), (0)},
109 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1)[SMU_MSG_SetDriverDramAddrHigh] = {1, (0xE), (1)},
110 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1)[SMU_MSG_SetDriverDramAddrLow] = {1, (0xF), (1)},
111 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0)[SMU_MSG_SetToolsDramAddrHigh] = {1, (0x10), (0)},
112 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0)[SMU_MSG_SetToolsDramAddrLow] = {1, (0x11), (0)},
113 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1)[SMU_MSG_TransferTableSmu2Dram] = {1, (0x12), (1)},
114 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0)[SMU_MSG_TransferTableDram2Smu] = {1, (0x13), (0)},
115 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0)[SMU_MSG_UseDefaultPPTable] = {1, (0x14), (0)},
116 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0)[SMU_MSG_RunDcBtc] = {1, (0x36), (0)},
117 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0)[SMU_MSG_EnterBaco] = {1, (0x15), (0)},
118 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0)[SMU_MSG_ExitBaco] = {1, (0x16), (0)},
119 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1)[SMU_MSG_SetSoftMinByFreq] = {1, (0x19), (1)},
120 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1)[SMU_MSG_SetSoftMaxByFreq] = {1, (0x1A), (1)},
121 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1)[SMU_MSG_SetHardMinByFreq] = {1, (0x1B), (1)},
122 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0)[SMU_MSG_SetHardMaxByFreq] = {1, (0x1C), (0)},
123 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1)[SMU_MSG_GetMinDpmFreq] = {1, (0x1D), (1)},
124 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1)[SMU_MSG_GetMaxDpmFreq] = {1, (0x1E), (1)},
125 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1)[SMU_MSG_GetDpmFreqByIndex] = {1, (0x1F), (1)},
126 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0)[SMU_MSG_PowerUpVcn] = {1, (0x2A), (0)},
127 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0)[SMU_MSG_PowerDownVcn] = {1, (0x2B), (0)},
128 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0)[SMU_MSG_PowerUpJpeg] = {1, (0x2C), (0)},
129 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0)[SMU_MSG_PowerDownJpeg] = {1, (0x2D), (0)},
130 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1)[SMU_MSG_GetDcModeMaxDpmFreq] = {1, (0x27), (1)},
131 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0)[SMU_MSG_OverridePcieParameters] = {1, (0x20), (0)},
132 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0)[SMU_MSG_DramLogSetDramAddrHigh] = {1, (0x21), (0)},
133 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0)[SMU_MSG_DramLogSetDramAddrLow] = {1, (0x22), (0)},
134 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0)[SMU_MSG_DramLogSetDramSize] = {1, (0x23), (0)},
135 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0)[SMU_MSG_AllowGfxOff] = {1, (0x28), (0)},
136 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0)[SMU_MSG_DisallowGfxOff] = {1, (0x29), (0)},
137 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0)[SMU_MSG_SetMGpuFanBoostLimitRpm] = {1, (0x3C), (0)},
138 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0)[SMU_MSG_GetPptLimit] = {1, (0x33), (0)},
139 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0)[SMU_MSG_NotifyPowerSource] = {1, (0x35), (0)},
140 MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0)[SMU_MSG_Mode1Reset] = {1, (0x2F), (0)},
141 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0)[SMU_MSG_PrepareMp1ForUnload] = {1, (0x2E), (0)},
142 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0)[SMU_MSG_DFCstateControl] = {1, (0x3B), (0)},
143 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0)[SMU_MSG_ArmD3] = {1, (0x17), (0)},
144 MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0)[SMU_MSG_SetNumBadMemoryPagesRetired] = {1, (0x49), (0)},
145 MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,[SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel] = {1, (0x4A
), (0)}
146 PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0)[SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel] = {1, (0x4A
), (0)}
,
147 MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0)[SMU_MSG_AllowGpo] = {1, (0x41), (0)},
148 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0)[SMU_MSG_AllowIHHostInterrupt] = {1, (0x4C), (0)},
149};
150
151static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
152 CLK_MAP(GFXCLK, PPCLK_GFXCLK)[SMU_GFXCLK] = {1, (PPCLK_GFXCLK)},
153 CLK_MAP(SCLK, PPCLK_GFXCLK)[SMU_SCLK] = {1, (PPCLK_GFXCLK)},
154 CLK_MAP(SOCCLK, PPCLK_SOCCLK)[SMU_SOCCLK] = {1, (PPCLK_SOCCLK)},
155 CLK_MAP(FCLK, PPCLK_FCLK)[SMU_FCLK] = {1, (PPCLK_FCLK)},
156 CLK_MAP(UCLK, PPCLK_UCLK)[SMU_UCLK] = {1, (PPCLK_UCLK)},
157 CLK_MAP(MCLK, PPCLK_UCLK)[SMU_MCLK] = {1, (PPCLK_UCLK)},
158 CLK_MAP(VCLK, PPCLK_VCLK_0)[SMU_VCLK] = {1, (PPCLK_VCLK_0)},
159 CLK_MAP(VCLK1, PPCLK_VCLK_1)[SMU_VCLK1] = {1, (PPCLK_VCLK_1)},
160 CLK_MAP(DCLK, PPCLK_DCLK_0)[SMU_DCLK] = {1, (PPCLK_DCLK_0)},
161 CLK_MAP(DCLK1, PPCLK_DCLK_1)[SMU_DCLK1] = {1, (PPCLK_DCLK_1)},
162};
163
164static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] = {
165 FEA_MAP(FW_DATA_READ)[SMU_FEATURE_FW_DATA_READ_BIT] = {1, 0},
166 FEA_MAP(DPM_GFXCLK)[SMU_FEATURE_DPM_GFXCLK_BIT] = {1, 1},
167 FEA_MAP(DPM_GFX_POWER_OPTIMIZER)[SMU_FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT] = {1, 2},
168 FEA_MAP(DPM_UCLK)[SMU_FEATURE_DPM_UCLK_BIT] = {1, 3},
169 FEA_MAP(DPM_FCLK)[SMU_FEATURE_DPM_FCLK_BIT] = {1, 4},
170 FEA_MAP(DPM_SOCCLK)[SMU_FEATURE_DPM_SOCCLK_BIT] = {1, 5},
171 FEA_MAP(DPM_MP0CLK)[SMU_FEATURE_DPM_MP0CLK_BIT] = {1, 6},
172 FEA_MAP(DPM_LINK)[SMU_FEATURE_DPM_LINK_BIT] = {1, 7},
173 FEA_MAP(DPM_DCN)[SMU_FEATURE_DPM_DCN_BIT] = {1, 8},
174 FEA_MAP(VMEMP_SCALING)[SMU_FEATURE_VMEMP_SCALING_BIT] = {1, 9},
175 FEA_MAP(VDDIO_MEM_SCALING)[SMU_FEATURE_VDDIO_MEM_SCALING_BIT] = {1, 10},
176 FEA_MAP(DS_GFXCLK)[SMU_FEATURE_DS_GFXCLK_BIT] = {1, 11},
177 FEA_MAP(DS_SOCCLK)[SMU_FEATURE_DS_SOCCLK_BIT] = {1, 12},
178 FEA_MAP(DS_FCLK)[SMU_FEATURE_DS_FCLK_BIT] = {1, 13},
179 FEA_MAP(DS_LCLK)[SMU_FEATURE_DS_LCLK_BIT] = {1, 14},
180 FEA_MAP(DS_DCFCLK)[SMU_FEATURE_DS_DCFCLK_BIT] = {1, 15},
181 FEA_MAP(DS_UCLK)[SMU_FEATURE_DS_UCLK_BIT] = {1, 16},
182 FEA_MAP(GFX_ULV)[SMU_FEATURE_GFX_ULV_BIT] = {1, 17},
183 FEA_MAP(FW_DSTATE)[SMU_FEATURE_FW_DSTATE_BIT] = {1, 18},
184 FEA_MAP(GFXOFF)[SMU_FEATURE_GFXOFF_BIT] = {1, 19},
185 FEA_MAP(BACO)[SMU_FEATURE_BACO_BIT] = {1, 20},
186 FEA_MAP(MM_DPM)[SMU_FEATURE_MM_DPM_BIT] = {1, 21},
187 FEA_MAP(SOC_MPCLK_DS)[SMU_FEATURE_SOC_MPCLK_DS_BIT] = {1, 22},
188 FEA_MAP(BACO_MPCLK_DS)[SMU_FEATURE_BACO_MPCLK_DS_BIT] = {1, 23},
189 FEA_MAP(THROTTLERS)[SMU_FEATURE_THROTTLERS_BIT] = {1, 24},
190 FEA_MAP(SMARTSHIFT)[SMU_FEATURE_SMARTSHIFT_BIT] = {1, 25},
191 FEA_MAP(GTHR)[SMU_FEATURE_GTHR_BIT] = {1, 26},
192 FEA_MAP(ACDC)[SMU_FEATURE_ACDC_BIT] = {1, 27},
193 FEA_MAP(VR0HOT)[SMU_FEATURE_VR0HOT_BIT] = {1, 28},
194 FEA_MAP(FW_CTF)[SMU_FEATURE_FW_CTF_BIT] = {1, 29},
195 FEA_MAP(FAN_CONTROL)[SMU_FEATURE_FAN_CONTROL_BIT] = {1, 30},
196 FEA_MAP(GFX_DCS)[SMU_FEATURE_GFX_DCS_BIT] = {1, 31},
197 FEA_MAP(GFX_READ_MARGIN)[SMU_FEATURE_GFX_READ_MARGIN_BIT] = {1, 32},
198 FEA_MAP(LED_DISPLAY)[SMU_FEATURE_LED_DISPLAY_BIT] = {1, 33},
199 FEA_MAP(GFXCLK_SPREAD_SPECTRUM)[SMU_FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT] = {1, 34},
200 FEA_MAP(OUT_OF_BAND_MONITOR)[SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT] = {1, 35},
201 FEA_MAP(OPTIMIZED_VMIN)[SMU_FEATURE_OPTIMIZED_VMIN_BIT] = {1, 36},
202 FEA_MAP(GFX_IMU)[SMU_FEATURE_GFX_IMU_BIT] = {1, 37},
203 FEA_MAP(BOOT_TIME_CAL)[SMU_FEATURE_BOOT_TIME_CAL_BIT] = {1, 38},
204 FEA_MAP(GFX_PCC_DFLL)[SMU_FEATURE_GFX_PCC_DFLL_BIT] = {1, 39},
205 FEA_MAP(SOC_CG)[SMU_FEATURE_SOC_CG_BIT] = {1, 40},
206 FEA_MAP(DF_CSTATE)[SMU_FEATURE_DF_CSTATE_BIT] = {1, 41},
207 FEA_MAP(GFX_EDC)[SMU_FEATURE_GFX_EDC_BIT] = {1, 42},
208 FEA_MAP(BOOT_POWER_OPT)[SMU_FEATURE_BOOT_POWER_OPT_BIT] = {1, 43},
209 FEA_MAP(CLOCK_POWER_DOWN_BYPASS)[SMU_FEATURE_CLOCK_POWER_DOWN_BYPASS_BIT] = {1, 44},
210 FEA_MAP(DS_VCN)[SMU_FEATURE_DS_VCN_BIT] = {1, 45},
211 FEA_MAP(BACO_CG)[SMU_FEATURE_BACO_CG_BIT] = {1, 46},
212 FEA_MAP(MEM_TEMP_READ)[SMU_FEATURE_MEM_TEMP_READ_BIT] = {1, 47},
213 FEA_MAP(ATHUB_MMHUB_PG)[SMU_FEATURE_ATHUB_MMHUB_PG_BIT] = {1, 48},
214 FEA_MAP(SOC_PCC)[SMU_FEATURE_SOC_PCC_BIT] = {1, 49},
215 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT21},
216 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT21},
217 [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT24},
218};
219
220static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
221 TAB_MAP(PPTABLE)[SMU_TABLE_PPTABLE] = {1, 0},
222 TAB_MAP(WATERMARKS)[SMU_TABLE_WATERMARKS] = {1, 2},
223 TAB_MAP(AVFS_PSM_DEBUG)[SMU_TABLE_AVFS_PSM_DEBUG] = {1, 3},
224 TAB_MAP(PMSTATUSLOG)[SMU_TABLE_PMSTATUSLOG] = {1, 4},
225 TAB_MAP(SMU_METRICS)[SMU_TABLE_SMU_METRICS] = {1, 5},
226 TAB_MAP(DRIVER_SMU_CONFIG)[SMU_TABLE_DRIVER_SMU_CONFIG] = {1, 6},
227 TAB_MAP(ACTIVITY_MONITOR_COEFF)[SMU_TABLE_ACTIVITY_MONITOR_COEFF] = {1, 7},
228 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE1},
229 TAB_MAP(I2C_COMMANDS)[SMU_TABLE_I2C_COMMANDS] = {1, 9},
230};
231
232static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
233 PWR_MAP(AC)[SMU_POWER_SOURCE_AC] = {1, POWER_SOURCE_AC},
234 PWR_MAP(DC)[SMU_POWER_SOURCE_DC] = {1, POWER_SOURCE_DC},
235};
236
237static struct cmn2asic_mapping smu_v13_0_0_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
238 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT)[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = {1, (0)},
239 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT)[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = {1, (1)},
240 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT)[PP_SMC_POWER_PROFILE_POWERSAVING] = {1, (2)},
241 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT)[PP_SMC_POWER_PROFILE_VIDEO] = {1, (3)},
242 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT)[PP_SMC_POWER_PROFILE_VR] = {1, (4)},
243 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT)[PP_SMC_POWER_PROFILE_COMPUTE] = {1, (5)},
244 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT)[PP_SMC_POWER_PROFILE_CUSTOM] = {1, (6)},
245};
246
247static const uint8_t smu_v13_0_0_throttler_map[] = {
248 [THROTTLER_PPT0_BIT16] = (SMU_THROTTLER_PPT0_BIT0),
249 [THROTTLER_PPT1_BIT17] = (SMU_THROTTLER_PPT1_BIT1),
250 [THROTTLER_PPT2_BIT18] = (SMU_THROTTLER_PPT2_BIT2),
251 [THROTTLER_PPT3_BIT19] = (SMU_THROTTLER_PPT3_BIT3),
252 [THROTTLER_TDC_GFX_BIT13] = (SMU_THROTTLER_TDC_GFX_BIT16),
253 [THROTTLER_TDC_SOC_BIT14] = (SMU_THROTTLER_TDC_SOC_BIT17),
254 [THROTTLER_TEMP_EDGE_BIT0] = (SMU_THROTTLER_TEMP_EDGE_BIT35),
255 [THROTTLER_TEMP_HOTSPOT_BIT1] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT36),
256 [THROTTLER_TEMP_MEM_BIT4] = (SMU_THROTTLER_TEMP_MEM_BIT34),
257 [THROTTLER_TEMP_VR_GFX_BIT5] = (SMU_THROTTLER_TEMP_VR_GFX_BIT38),
258 [THROTTLER_TEMP_VR_SOC_BIT8] = (SMU_THROTTLER_TEMP_VR_SOC_BIT39),
259 [THROTTLER_TEMP_VR_MEM0_BIT6] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT40),
260 [THROTTLER_TEMP_VR_MEM1_BIT7] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT41),
261 [THROTTLER_TEMP_LIQUID0_BIT10] = (SMU_THROTTLER_TEMP_LIQUID0_BIT42),
262 [THROTTLER_TEMP_LIQUID1_BIT11] = (SMU_THROTTLER_TEMP_LIQUID1_BIT43),
263 [THROTTLER_GFX_APCC_PLUS_BIT21] = (SMU_THROTTLER_APCC_BIT23),
264 [THROTTLER_FIT_BIT20] = (SMU_THROTTLER_FIT_BIT57),
265};
266
267static int
268smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
269 uint32_t *feature_mask, uint32_t num)
270{
271 struct amdgpu_device *adev = smu->adev;
272 u32 smu_version;
273
274 if (num > 2)
275 return -EINVAL22;
276
277 memset(feature_mask, 0xff, sizeof(uint32_t) * num)__builtin_memset((feature_mask), (0xff), (sizeof(uint32_t) * num
))
;
278
279 if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) {
280 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)(1ULL << 1);
281 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT)(1ULL << 37);
282 }
283
284 if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB(1 << 16)) ||
285 !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB(1 << 13)))
286 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT)(1ULL << 48);
287
288 if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
289 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)(1ULL << 5);
290
291 /* PMFW 78.58 contains a critical fix for gfxoff feature */
292 smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version);
293 if ((smu_version < 0x004e3a00) ||
294 !(adev->pm.pp_feature & PP_GFXOFF_MASK))
295 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT)(1ULL << 19);
296
297 if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
298 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT)(1ULL << 3);
299 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT)(1ULL << 9);
300 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT)(1ULL << 10);
301 }
302
303 if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
304 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT)(1ULL << 11);
305
306 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
307 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT)(1ULL << 7);
308 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT)(1ULL << 14);
309 }
310
311 if (!(adev->pm.pp_feature & PP_ULV_MASK))
312 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT)(1ULL << 17);
313
314 return 0;
315}
316
317static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
318{
319 struct smu_table_context *table_context = &smu->smu_table;
320 struct smu_13_0_0_powerplay_table *powerplay_table =
321 table_context->power_play_table;
322 struct smu_baco_context *smu_baco = &smu->smu_baco;
323
324 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC0x4)
325 smu->dc_controlled_by_gpio = true1;
326
327 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO0x8) {
328 smu_baco->platform_support = true1;
329
330 if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO0x10)
331 smu_baco->maco_support = true1;
332 }
333
334 table_context->thermal_controller_type =
335 powerplay_table->thermal_controller_type;
336
337 /*
338 * Instead of having its own buffer space and get overdrive_table copied,
339 * smu->od_settings just points to the actual overdrive_table
340 */
341 smu->od_settings = &powerplay_table->overdrive_table;
342
343 return 0;
344}
345
346static int smu_v13_0_0_store_powerplay_table(struct smu_context *smu)
347{
348 struct smu_table_context *table_context = &smu->smu_table;
349 struct smu_13_0_0_powerplay_table *powerplay_table =
350 table_context->power_play_table;
351
352 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,__builtin_memcpy((table_context->driver_pptable), (&powerplay_table
->smc_pptable), (sizeof(PPTable_t)))
353 sizeof(PPTable_t))__builtin_memcpy((table_context->driver_pptable), (&powerplay_table
->smc_pptable), (sizeof(PPTable_t)))
;
354
355 return 0;
356}
357
358#ifndef atom_smc_dpm_info_table_13_0_0
359struct atom_smc_dpm_info_table_13_0_0 {
360 struct atom_common_table_header table_header;
361 BoardTable_t BoardTable;
362};
363#endif
364
365static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu)
366{
367 struct smu_table_context *table_context = &smu->smu_table;
368 PPTable_t *smc_pptable = table_context->driver_pptable;
369 struct atom_smc_dpm_info_table_13_0_0 *smc_dpm_table;
370 BoardTable_t *BoardTable = &smc_pptable->BoardTable;
371 int index, ret;
372
373 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,(__builtin_offsetof(struct atom_master_list_of_data_tables_v2_1
, smc_dpm_info) / sizeof(uint16_t))
374 smc_dpm_info)(__builtin_offsetof(struct atom_master_list_of_data_tables_v2_1
, smc_dpm_info) / sizeof(uint16_t))
;
375
376 ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL((void *)0), NULL((void *)0), NULL((void *)0),
377 (uint8_t **)&smc_dpm_table);
378 if (ret)
379 return ret;
380
381 memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t))__builtin_memcpy((BoardTable), (&smc_dpm_table->BoardTable
), (sizeof(BoardTable_t)))
;
382
383 return 0;
384}
385
386static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu,
387 void **table,
388 uint32_t *size)
389{
390 struct smu_table_context *smu_table = &smu->smu_table;
391 void *combo_pptable = smu_table->combo_pptable;
392 int ret = 0;
393
394 ret = smu_cmn_get_combo_pptable(smu);
395 if (ret)
396 return ret;
397
398 *table = combo_pptable;
399 *size = sizeof(struct smu_13_0_0_powerplay_table);
400
401 return 0;
402}
403
404static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
405{
406 struct smu_table_context *smu_table = &smu->smu_table;
407 struct amdgpu_device *adev = smu->adev;
408 int ret = 0;
409
410 if (amdgpu_sriov_vf(smu->adev)((smu->adev)->virt.caps & (1 << 2)))
411 return 0;
412
413 ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
414 &smu_table->power_play_table,
415 &smu_table->power_play_table_size);
416 if (ret)
417 return ret;
418
419 ret = smu_v13_0_0_store_powerplay_table(smu);
420 if (ret)
421 return ret;
422
423 /*
424 * With SCPM enabled, the operation below will be handled
425 * by PSP. Driver involvment is unnecessary and useless.
426 */
427 if (!adev->scpm_enabled) {
428 ret = smu_v13_0_0_append_powerplay_table(smu);
429 if (ret)
430 return ret;
431 }
432
433 ret = smu_v13_0_0_check_powerplay_table(smu);
434 if (ret)
435 return ret;
436
437 return ret;
438}
439
440static int smu_v13_0_0_tables_init(struct smu_context *smu)
441{
442 struct smu_table_context *smu_table = &smu->smu_table;
443 struct smu_table *tables = smu_table->tables;
444
445 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),do { tables[SMU_TABLE_PPTABLE].size = sizeof(PPTable_t); tables
[SMU_TABLE_PPTABLE].align = (1 << 12); tables[SMU_TABLE_PPTABLE
].domain = 0x4; } while (0)
446 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_PPTABLE].size = sizeof(PPTable_t); tables
[SMU_TABLE_PPTABLE].align = (1 << 12); tables[SMU_TABLE_PPTABLE
].domain = 0x4; } while (0)
;
447 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),do { tables[SMU_TABLE_WATERMARKS].size = sizeof(Watermarks_t)
; tables[SMU_TABLE_WATERMARKS].align = (1 << 12); tables
[SMU_TABLE_WATERMARKS].domain = 0x4; } while (0)
448 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_WATERMARKS].size = sizeof(Watermarks_t)
; tables[SMU_TABLE_WATERMARKS].align = (1 << 12); tables
[SMU_TABLE_WATERMARKS].domain = 0x4; } while (0)
;
449 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),do { tables[SMU_TABLE_SMU_METRICS].size = sizeof(SmuMetricsExternal_t
); tables[SMU_TABLE_SMU_METRICS].align = (1 << 12); tables
[SMU_TABLE_SMU_METRICS].domain = 0x4; } while (0)
450 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_SMU_METRICS].size = sizeof(SmuMetricsExternal_t
); tables[SMU_TABLE_SMU_METRICS].align = (1 << 12); tables
[SMU_TABLE_SMU_METRICS].domain = 0x4; } while (0)
;
451 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),do { tables[SMU_TABLE_I2C_COMMANDS].size = sizeof(SwI2cRequest_t
); tables[SMU_TABLE_I2C_COMMANDS].align = (1 << 12); tables
[SMU_TABLE_I2C_COMMANDS].domain = 0x4; } while (0)
452 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_I2C_COMMANDS].size = sizeof(SwI2cRequest_t
); tables[SMU_TABLE_I2C_COMMANDS].align = (1 << 12); tables
[SMU_TABLE_I2C_COMMANDS].domain = 0x4; } while (0)
;
453 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),do { tables[SMU_TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t
); tables[SMU_TABLE_OVERDRIVE].align = (1 << 12); tables
[SMU_TABLE_OVERDRIVE].domain = 0x4; } while (0)
454 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t
); tables[SMU_TABLE_OVERDRIVE].align = (1 << 12); tables
[SMU_TABLE_OVERDRIVE].domain = 0x4; } while (0)
;
455 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,do { tables[SMU_TABLE_PMSTATUSLOG].size = 0x19000; tables[SMU_TABLE_PMSTATUSLOG
].align = (1 << 12); tables[SMU_TABLE_PMSTATUSLOG].domain
= 0x4; } while (0)
456 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_PMSTATUSLOG].size = 0x19000; tables[SMU_TABLE_PMSTATUSLOG
].align = (1 << 12); tables[SMU_TABLE_PMSTATUSLOG].domain
= 0x4; } while (0)
;
457 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,do { tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffIntExternal_t
); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].align = (1 <<
12); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].domain = 0x4; }
while (0)
458 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,do { tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffIntExternal_t
); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].align = (1 <<
12); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].domain = 0x4; }
while (0)
459 AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffIntExternal_t
); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].align = (1 <<
12); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].domain = 0x4; }
while (0)
;
460 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,do { tables[SMU_TABLE_COMBO_PPTABLE].size = 0x4000; tables[SMU_TABLE_COMBO_PPTABLE
].align = (1 << 12); tables[SMU_TABLE_COMBO_PPTABLE].domain
= 0x4; } while (0)
461 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_COMBO_PPTABLE].size = 0x4000; tables[SMU_TABLE_COMBO_PPTABLE
].align = (1 << 12); tables[SMU_TABLE_COMBO_PPTABLE].domain
= 0x4; } while (0)
;
462
463 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL(0x0001 | 0x0004));
464 if (!smu_table->metrics_table)
465 goto err0_out;
466 smu_table->metrics_time = 0;
467
468 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
469 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL(0x0001 | 0x0004));
470 if (!smu_table->gpu_metrics_table)
471 goto err1_out;
472
473 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL(0x0001 | 0x0004));
474 if (!smu_table->watermarks_table)
475 goto err2_out;
476
477 return 0;
478
479err2_out:
480 kfree(smu_table->gpu_metrics_table);
481err1_out:
482 kfree(smu_table->metrics_table);
483err0_out:
484 return -ENOMEM12;
485}
486
487static int smu_v13_0_0_allocate_dpm_context(struct smu_context *smu)
488{
489 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
490
491 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
492 GFP_KERNEL(0x0001 | 0x0004));
493 if (!smu_dpm->dpm_context)
494 return -ENOMEM12;
495
496 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
497
498 return 0;
499}
500
501static int smu_v13_0_0_init_smc_tables(struct smu_context *smu)
502{
503 int ret = 0;
504
505 ret = smu_v13_0_0_tables_init(smu);
506 if (ret)
507 return ret;
508
509 ret = smu_v13_0_0_allocate_dpm_context(smu);
510 if (ret)
511 return ret;
512
513 return smu_v13_0_init_smc_tables(smu);
514}
515
516static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
517{
518 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
519 struct smu_table_context *table_context = &smu->smu_table;
520 PPTable_t *pptable = table_context->driver_pptable;
521 SkuTable_t *skutable = &pptable->SkuTable;
522 struct smu_13_0_dpm_table *dpm_table;
523 struct smu_13_0_pcie_table *pcie_table;
524 uint32_t link_level;
525 int ret = 0;
526
527 /* socclk dpm table setup */
528 dpm_table = &dpm_context->dpm_tables.soc_table;
529 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
530 ret = smu_v13_0_set_single_dpm_table(smu,
531 SMU_SOCCLK,
532 dpm_table);
533 if (ret)
534 return ret;
535 } else {
536 dpm_table->count = 1;
537 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
538 dpm_table->dpm_levels[0].enabled = true1;
539 dpm_table->min = dpm_table->dpm_levels[0].value;
540 dpm_table->max = dpm_table->dpm_levels[0].value;
541 }
542
543 /* gfxclk dpm table setup */
544 dpm_table = &dpm_context->dpm_tables.gfx_table;
545 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
546 ret = smu_v13_0_set_single_dpm_table(smu,
547 SMU_GFXCLK,
548 dpm_table);
549 if (ret)
550 return ret;
551
552 /*
553 * Update the reported maximum shader clock to the value
554 * which can be guarded to be achieved on all cards. This
555 * is aligned with Window setting. And considering that value
556 * might be not the peak frequency the card can achieve, it
557 * is normal some real-time clock frequency can overtake this
558 * labelled maximum clock frequency(for example in pp_dpm_sclk
559 * sysfs output).
560 */
561 if (skutable->DriverReportedClocks.GameClockAc &&
562 (dpm_table->dpm_levels[dpm_table->count - 1].value >
563 skutable->DriverReportedClocks.GameClockAc)) {
564 dpm_table->dpm_levels[dpm_table->count - 1].value =
565 skutable->DriverReportedClocks.GameClockAc;
566 dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
567 }
568 } else {
569 dpm_table->count = 1;
570 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
571 dpm_table->dpm_levels[0].enabled = true1;
572 dpm_table->min = dpm_table->dpm_levels[0].value;
573 dpm_table->max = dpm_table->dpm_levels[0].value;
574 }
575
576 /* uclk dpm table setup */
577 dpm_table = &dpm_context->dpm_tables.uclk_table;
578 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
579 ret = smu_v13_0_set_single_dpm_table(smu,
580 SMU_UCLK,
581 dpm_table);
582 if (ret)
583 return ret;
584 } else {
585 dpm_table->count = 1;
586 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
587 dpm_table->dpm_levels[0].enabled = true1;
588 dpm_table->min = dpm_table->dpm_levels[0].value;
589 dpm_table->max = dpm_table->dpm_levels[0].value;
590 }
591
592 /* fclk dpm table setup */
593 dpm_table = &dpm_context->dpm_tables.fclk_table;
594 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
595 ret = smu_v13_0_set_single_dpm_table(smu,
596 SMU_FCLK,
597 dpm_table);
598 if (ret)
599 return ret;
600 } else {
601 dpm_table->count = 1;
602 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
603 dpm_table->dpm_levels[0].enabled = true1;
604 dpm_table->min = dpm_table->dpm_levels[0].value;
605 dpm_table->max = dpm_table->dpm_levels[0].value;
606 }
607
608 /* vclk dpm table setup */
609 dpm_table = &dpm_context->dpm_tables.vclk_table;
610 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
611 ret = smu_v13_0_set_single_dpm_table(smu,
612 SMU_VCLK,
613 dpm_table);
614 if (ret)
615 return ret;
616 } else {
617 dpm_table->count = 1;
618 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
619 dpm_table->dpm_levels[0].enabled = true1;
620 dpm_table->min = dpm_table->dpm_levels[0].value;
621 dpm_table->max = dpm_table->dpm_levels[0].value;
622 }
623
624 /* dclk dpm table setup */
625 dpm_table = &dpm_context->dpm_tables.dclk_table;
626 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
627 ret = smu_v13_0_set_single_dpm_table(smu,
628 SMU_DCLK,
629 dpm_table);
630 if (ret)
631 return ret;
632 } else {
633 dpm_table->count = 1;
634 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
635 dpm_table->dpm_levels[0].enabled = true1;
636 dpm_table->min = dpm_table->dpm_levels[0].value;
637 dpm_table->max = dpm_table->dpm_levels[0].value;
638 }
639
640 /* lclk dpm table setup */
641 pcie_table = &dpm_context->dpm_tables.pcie_table;
642 pcie_table->num_of_link_levels = 0;
643 for (link_level = 0; link_level < NUM_LINK_LEVELS3; link_level++) {
644 if (!skutable->PcieGenSpeed[link_level] &&
645 !skutable->PcieLaneCount[link_level] &&
646 !skutable->LclkFreq[link_level])
647 continue;
648
649 pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
650 skutable->PcieGenSpeed[link_level];
651 pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
652 skutable->PcieLaneCount[link_level];
653 pcie_table->clk_freq[pcie_table->num_of_link_levels] =
654 skutable->LclkFreq[link_level];
655 pcie_table->num_of_link_levels++;
656 }
657
658 return 0;
659}
660
661static bool_Bool smu_v13_0_0_is_dpm_running(struct smu_context *smu)
662{
663 int ret = 0;
664 uint64_t feature_enabled;
665
666 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
667 if (ret)
668 return false0;
669
670 return !!(feature_enabled & SMC_DPM_FEATURE( (1ULL << 1) | (1ULL << 3) | (1ULL << 7) |
(1ULL << 5) | (1ULL << 4) | (1ULL << 6))
);
671}
672
673static void smu_v13_0_0_dump_pptable(struct smu_context *smu)
674{
675 struct smu_table_context *table_context = &smu->smu_table;
676 PPTable_t *pptable = table_context->driver_pptable;
677 SkuTable_t *skutable = &pptable->SkuTable;
Value stored to 'skutable' during its initialization is never read
678
679 dev_info(smu->adev->dev, "Dumped PPTable:\n")do { } while(0);
680
681 dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version)do { } while(0);
682 dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0])do { } while(0);
683 dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1])do { } while(0);
684}
685
686static int smu_v13_0_0_system_features_control(struct smu_context *smu,
687 bool_Bool en)
688{
689 return smu_v13_0_system_features_control(smu, en);
690}
691
692static uint32_t smu_v13_0_get_throttler_status(SmuMetrics_t *metrics)
693{
694 uint32_t throttler_status = 0;
695 int i;
696
697 for (i = 0; i < THROTTLER_COUNT22; i++)
698 throttler_status |=
699 (metrics->ThrottlingPercentage[i] ? 1U << i : 0);
700
701 return throttler_status;
702}
703
704#define SMU_13_0_0_BUSY_THRESHOLD15 15
705static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu,
706 MetricsMember_t member,
707 uint32_t *value)
708{
709 struct smu_table_context *smu_table = &smu->smu_table;
710 SmuMetrics_t *metrics =
711 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
712 int ret = 0;
713
714 ret = smu_cmn_get_metrics_table(smu,
715 NULL((void *)0),
716 false0);
717 if (ret)
718 return ret;
719
720 switch (member) {
721 case METRICS_CURR_GFXCLK:
722 *value = metrics->CurrClock[PPCLK_GFXCLK];
723 break;
724 case METRICS_CURR_SOCCLK:
725 *value = metrics->CurrClock[PPCLK_SOCCLK];
726 break;
727 case METRICS_CURR_UCLK:
728 *value = metrics->CurrClock[PPCLK_UCLK];
729 break;
730 case METRICS_CURR_VCLK:
731 *value = metrics->CurrClock[PPCLK_VCLK_0];
732 break;
733 case METRICS_CURR_VCLK1:
734 *value = metrics->CurrClock[PPCLK_VCLK_1];
735 break;
736 case METRICS_CURR_DCLK:
737 *value = metrics->CurrClock[PPCLK_DCLK_0];
738 break;
739 case METRICS_CURR_DCLK1:
740 *value = metrics->CurrClock[PPCLK_DCLK_1];
741 break;
742 case METRICS_CURR_FCLK:
743 *value = metrics->CurrClock[PPCLK_FCLK];
744 break;
745 case METRICS_AVERAGE_GFXCLK:
746 if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD15)
747 *value = metrics->AverageGfxclkFrequencyPostDs;
748 else
749 *value = metrics->AverageGfxclkFrequencyPreDs;
750 break;
751 case METRICS_AVERAGE_FCLK:
752 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD15)
753 *value = metrics->AverageFclkFrequencyPostDs;
754 else
755 *value = metrics->AverageFclkFrequencyPreDs;
756 break;
757 case METRICS_AVERAGE_UCLK:
758 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD15)
759 *value = metrics->AverageMemclkFrequencyPostDs;
760 else
761 *value = metrics->AverageMemclkFrequencyPreDs;
762 break;
763 case METRICS_AVERAGE_VCLK:
764 *value = metrics->AverageVclk0Frequency;
765 break;
766 case METRICS_AVERAGE_DCLK:
767 *value = metrics->AverageDclk0Frequency;
768 break;
769 case METRICS_AVERAGE_VCLK1:
770 *value = metrics->AverageVclk1Frequency;
771 break;
772 case METRICS_AVERAGE_DCLK1:
773 *value = metrics->AverageDclk1Frequency;
774 break;
775 case METRICS_AVERAGE_GFXACTIVITY:
776 *value = metrics->AverageGfxActivity;
777 break;
778 case METRICS_AVERAGE_MEMACTIVITY:
779 *value = metrics->AverageUclkActivity;
780 break;
781 case METRICS_AVERAGE_SOCKETPOWER:
782 *value = metrics->AverageSocketPower << 8;
783 break;
784 case METRICS_TEMPERATURE_EDGE:
785 *value = metrics->AvgTemperature[TEMP_EDGE] *
786 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
787 break;
788 case METRICS_TEMPERATURE_HOTSPOT:
789 *value = metrics->AvgTemperature[TEMP_HOTSPOT] *
790 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
791 break;
792 case METRICS_TEMPERATURE_MEM:
793 *value = metrics->AvgTemperature[TEMP_MEM] *
794 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
795 break;
796 case METRICS_TEMPERATURE_VRGFX:
797 *value = metrics->AvgTemperature[TEMP_VR_GFX] *
798 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
799 break;
800 case METRICS_TEMPERATURE_VRSOC:
801 *value = metrics->AvgTemperature[TEMP_VR_SOC] *
802 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
803 break;
804 case METRICS_THROTTLER_STATUS:
805 *value = smu_v13_0_get_throttler_status(metrics);
806 break;
807 case METRICS_CURR_FANSPEED:
808 *value = metrics->AvgFanRpm;
809 break;
810 case METRICS_CURR_FANPWM:
811 *value = metrics->AvgFanPwm;
812 break;
813 case METRICS_VOLTAGE_VDDGFX:
814 *value = metrics->AvgVoltage[SVI_PLANE_GFX];
815 break;
816 case METRICS_PCIE_RATE:
817 *value = metrics->PcieRate;
818 break;
819 case METRICS_PCIE_WIDTH:
820 *value = metrics->PcieWidth;
821 break;
822 default:
823 *value = UINT_MAX0xffffffffU;
824 break;
825 }
826
827 return ret;
828}
829
830static int smu_v13_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
831 enum smu_clk_type clk_type,
832 uint32_t *min,
833 uint32_t *max)
834{
835 struct smu_13_0_dpm_context *dpm_context =
836 smu->smu_dpm.dpm_context;
837 struct smu_13_0_dpm_table *dpm_table;
838
839 switch (clk_type) {
840 case SMU_MCLK:
841 case SMU_UCLK:
842 /* uclk dpm table */
843 dpm_table = &dpm_context->dpm_tables.uclk_table;
844 break;
845 case SMU_GFXCLK:
846 case SMU_SCLK:
847 /* gfxclk dpm table */
848 dpm_table = &dpm_context->dpm_tables.gfx_table;
849 break;
850 case SMU_SOCCLK:
851 /* socclk dpm table */
852 dpm_table = &dpm_context->dpm_tables.soc_table;
853 break;
854 case SMU_FCLK:
855 /* fclk dpm table */
856 dpm_table = &dpm_context->dpm_tables.fclk_table;
857 break;
858 case SMU_VCLK:
859 case SMU_VCLK1:
860 /* vclk dpm table */
861 dpm_table = &dpm_context->dpm_tables.vclk_table;
862 break;
863 case SMU_DCLK:
864 case SMU_DCLK1:
865 /* dclk dpm table */
866 dpm_table = &dpm_context->dpm_tables.dclk_table;
867 break;
868 default:
869 dev_err(smu->adev->dev, "Unsupported clock type!\n")printf("drm:pid%d:%s *ERROR* " "Unsupported clock type!\n", (
{struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
870 return -EINVAL22;
871 }
872
873 if (min)
874 *min = dpm_table->min;
875 if (max)
876 *max = dpm_table->max;
877
878 return 0;
879}
880
881static int smu_v13_0_0_read_sensor(struct smu_context *smu,
882 enum amd_pp_sensors sensor,
883 void *data,
884 uint32_t *size)
885{
886 struct smu_table_context *table_context = &smu->smu_table;
887 PPTable_t *smc_pptable = table_context->driver_pptable;
888 int ret = 0;
889
890 switch (sensor) {
891 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
892 *(uint16_t *)data = smc_pptable->SkuTable.FanMaximumRpm;
893 *size = 4;
894 break;
895 case AMDGPU_PP_SENSOR_MEM_LOAD:
896 ret = smu_v13_0_0_get_smu_metrics_data(smu,
897 METRICS_AVERAGE_MEMACTIVITY,
898 (uint32_t *)data);
899 *size = 4;
900 break;
901 case AMDGPU_PP_SENSOR_GPU_LOAD:
902 ret = smu_v13_0_0_get_smu_metrics_data(smu,
903 METRICS_AVERAGE_GFXACTIVITY,
904 (uint32_t *)data);
905 *size = 4;
906 break;
907 case AMDGPU_PP_SENSOR_GPU_POWER:
908 ret = smu_v13_0_0_get_smu_metrics_data(smu,
909 METRICS_AVERAGE_SOCKETPOWER,
910 (uint32_t *)data);
911 *size = 4;
912 break;
913 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
914 ret = smu_v13_0_0_get_smu_metrics_data(smu,
915 METRICS_TEMPERATURE_HOTSPOT,
916 (uint32_t *)data);
917 *size = 4;
918 break;
919 case AMDGPU_PP_SENSOR_EDGE_TEMP:
920 ret = smu_v13_0_0_get_smu_metrics_data(smu,
921 METRICS_TEMPERATURE_EDGE,
922 (uint32_t *)data);
923 *size = 4;
924 break;
925 case AMDGPU_PP_SENSOR_MEM_TEMP:
926 ret = smu_v13_0_0_get_smu_metrics_data(smu,
927 METRICS_TEMPERATURE_MEM,
928 (uint32_t *)data);
929 *size = 4;
930 break;
931 case AMDGPU_PP_SENSOR_GFX_MCLK:
932 ret = smu_v13_0_0_get_smu_metrics_data(smu,
933 METRICS_CURR_UCLK,
934 (uint32_t *)data);
935 *(uint32_t *)data *= 100;
936 *size = 4;
937 break;
938 case AMDGPU_PP_SENSOR_GFX_SCLK:
939 ret = smu_v13_0_0_get_smu_metrics_data(smu,
940 METRICS_AVERAGE_GFXCLK,
941 (uint32_t *)data);
942 *(uint32_t *)data *= 100;
943 *size = 4;
944 break;
945 case AMDGPU_PP_SENSOR_VDDGFX:
946 ret = smu_v13_0_0_get_smu_metrics_data(smu,
947 METRICS_VOLTAGE_VDDGFX,
948 (uint32_t *)data);
949 *size = 4;
950 break;
951 default:
952 ret = -EOPNOTSUPP45;
953 break;
954 }
955
956 return ret;
957}
958
959static int smu_v13_0_0_get_current_clk_freq_by_table(struct smu_context *smu,
960 enum smu_clk_type clk_type,
961 uint32_t *value)
962{
963 MetricsMember_t member_type;
964 int clk_id = 0;
965
966 clk_id = smu_cmn_to_asic_specific_index(smu,
967 CMN2ASIC_MAPPING_CLK,
968 clk_type);
969 if (clk_id < 0)
970 return -EINVAL22;
971
972 switch (clk_id) {
973 case PPCLK_GFXCLK:
974 member_type = METRICS_AVERAGE_GFXCLK;
975 break;
976 case PPCLK_UCLK:
977 member_type = METRICS_CURR_UCLK;
978 break;
979 case PPCLK_FCLK:
980 member_type = METRICS_CURR_FCLK;
981 break;
982 case PPCLK_SOCCLK:
983 member_type = METRICS_CURR_SOCCLK;
984 break;
985 case PPCLK_VCLK_0:
986 member_type = METRICS_AVERAGE_VCLK;
987 break;
988 case PPCLK_DCLK_0:
989 member_type = METRICS_AVERAGE_DCLK;
990 break;
991 case PPCLK_VCLK_1:
992 member_type = METRICS_AVERAGE_VCLK1;
993 break;
994 case PPCLK_DCLK_1:
995 member_type = METRICS_AVERAGE_DCLK1;
996 break;
997 default:
998 return -EINVAL22;
999 }
1000
1001 return smu_v13_0_0_get_smu_metrics_data(smu,
1002 member_type,
1003 value);
1004}
1005
1006static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
1007 enum smu_clk_type clk_type,
1008 char *buf)
1009{
1010 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1011 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1012 struct smu_13_0_dpm_table *single_dpm_table;
1013 struct smu_13_0_pcie_table *pcie_table;
1014 const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
1015 uint32_t gen_speed, lane_width;
1016 int i, curr_freq, size = 0;
1017 int ret = 0;
1018
1019 smu_cmn_get_sysfs_buf(&buf, &size);
1020
1021 if (amdgpu_ras_intr_triggered()) {
1022 size += sysfs_emit_at(buf, size, "unavailable\n");
1023 return size;
1024 }
1025
1026 switch (clk_type) {
1027 case SMU_SCLK:
1028 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1029 break;
1030 case SMU_MCLK:
1031 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1032 break;
1033 case SMU_SOCCLK:
1034 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1035 break;
1036 case SMU_FCLK:
1037 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1038 break;
1039 case SMU_VCLK:
1040 case SMU_VCLK1:
1041 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1042 break;
1043 case SMU_DCLK:
1044 case SMU_DCLK1:
1045 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1046 break;
1047 default:
1048 break;
1049 }
1050
1051 switch (clk_type) {
1052 case SMU_SCLK:
1053 case SMU_MCLK:
1054 case SMU_SOCCLK:
1055 case SMU_FCLK:
1056 case SMU_VCLK:
1057 case SMU_VCLK1:
1058 case SMU_DCLK:
1059 case SMU_DCLK1:
1060 ret = smu_v13_0_0_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
1061 if (ret) {
1062 dev_err(smu->adev->dev, "Failed to get current clock freq!")printf("drm:pid%d:%s *ERROR* " "Failed to get current clock freq!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1063 return ret;
1064 }
1065
1066 if (single_dpm_table->is_fine_grained) {
1067 /*
1068 * For fine grained dpms, there are only two dpm levels:
1069 * - level 0 -> min clock freq
1070 * - level 1 -> max clock freq
1071 * And the current clock frequency can be any value between them.
1072 * So, if the current clock frequency is not at level 0 or level 1,
1073 * we will fake it as three dpm levels:
1074 * - level 0 -> min clock freq
1075 * - level 1 -> current actual clock freq
1076 * - level 2 -> max clock freq
1077 */
1078 if ((single_dpm_table->dpm_levels[0].value != curr_freq) &&
1079 (single_dpm_table->dpm_levels[1].value != curr_freq)) {
1080 size += sysfs_emit_at(buf, size, "0: %uMhz\n",
1081 single_dpm_table->dpm_levels[0].value);
1082 size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
1083 curr_freq);
1084 size += sysfs_emit_at(buf, size, "2: %uMhz\n",
1085 single_dpm_table->dpm_levels[1].value);
1086 } else {
1087 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
1088 single_dpm_table->dpm_levels[0].value,
1089 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : "");
1090 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
1091 single_dpm_table->dpm_levels[1].value,
1092 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : "");
1093 }
1094 } else {
1095 for (i = 0; i < single_dpm_table->count; i++)
1096 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1097 i, single_dpm_table->dpm_levels[i].value,
1098 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : "");
1099 }
1100 break;
1101 case SMU_PCIE:
1102 ret = smu_v13_0_0_get_smu_metrics_data(smu,
1103 METRICS_PCIE_RATE,
1104 &gen_speed);
1105 if (ret)
1106 return ret;
1107
1108 ret = smu_v13_0_0_get_smu_metrics_data(smu,
1109 METRICS_PCIE_WIDTH,
1110 &lane_width);
1111 if (ret)
1112 return ret;
1113
1114 pcie_table = &(dpm_context->dpm_tables.pcie_table);
1115 for (i = 0; i < pcie_table->num_of_link_levels; i++)
1116 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
1117 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
1118 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
1119 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
1120 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "",
1121 (pcie_table->pcie_lane[i] == 1) ? "x1" :
1122 (pcie_table->pcie_lane[i] == 2) ? "x2" :
1123 (pcie_table->pcie_lane[i] == 3) ? "x4" :
1124 (pcie_table->pcie_lane[i] == 4) ? "x8" :
1125 (pcie_table->pcie_lane[i] == 5) ? "x12" :
1126 (pcie_table->pcie_lane[i] == 6) ? "x16" : "",
1127 pcie_table->clk_freq[i],
1128 ((gen_speed - 1) == pcie_table->pcie_gen[i]) &&
1129 (lane_width == link_width[pcie_table->pcie_lane[i]]) ?
1130 "*" : "");
1131 break;
1132
1133 default:
1134 break;
1135 }
1136
1137 return size;
1138}
1139
1140static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
1141 enum smu_clk_type clk_type,
1142 uint32_t mask)
1143{
1144 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1145 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1146 struct smu_13_0_dpm_table *single_dpm_table;
1147 uint32_t soft_min_level, soft_max_level;
1148 uint32_t min_freq, max_freq;
1149 int ret = 0;
1150
1151 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1152 soft_max_level = mask ? (fls(mask) - 1) : 0;
1153
1154 switch (clk_type) {
1155 case SMU_GFXCLK:
1156 case SMU_SCLK:
1157 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1158 break;
1159 case SMU_MCLK:
1160 case SMU_UCLK:
1161 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1162 break;
1163 case SMU_SOCCLK:
1164 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1165 break;
1166 case SMU_FCLK:
1167 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1168 break;
1169 case SMU_VCLK:
1170 case SMU_VCLK1:
1171 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1172 break;
1173 case SMU_DCLK:
1174 case SMU_DCLK1:
1175 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1176 break;
1177 default:
1178 break;
1179 }
1180
1181 switch (clk_type) {
1182 case SMU_GFXCLK:
1183 case SMU_SCLK:
1184 case SMU_MCLK:
1185 case SMU_UCLK:
1186 case SMU_SOCCLK:
1187 case SMU_FCLK:
1188 case SMU_VCLK:
1189 case SMU_VCLK1:
1190 case SMU_DCLK:
1191 case SMU_DCLK1:
1192 if (single_dpm_table->is_fine_grained) {
1193 /* There is only 2 levels for fine grained DPM */
1194 soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1195 soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1196 } else {
1197 if ((soft_max_level >= single_dpm_table->count) ||
1198 (soft_min_level >= single_dpm_table->count))
1199 return -EINVAL22;
1200 }
1201
1202 min_freq = single_dpm_table->dpm_levels[soft_min_level].value;
1203 max_freq = single_dpm_table->dpm_levels[soft_max_level].value;
1204
1205 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1206 clk_type,
1207 min_freq,
1208 max_freq);
1209 break;
1210 case SMU_DCEFCLK:
1211 case SMU_PCIE:
1212 default:
1213 break;
1214 }
1215
1216 return ret;
1217}
1218
1219static const struct smu_temperature_range smu13_thermal_policy[] = {
1220 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
1221 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
1222};
1223
1224static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
1225 struct smu_temperature_range *range)
1226{
1227 struct smu_table_context *table_context = &smu->smu_table;
1228 struct smu_13_0_0_powerplay_table *powerplay_table =
1229 table_context->power_play_table;
1230 PPTable_t *pptable = smu->smu_table.driver_pptable;
1231
1232 if (amdgpu_sriov_vf(smu->adev)((smu->adev)->virt.caps & (1 << 2)))
1233 return 0;
1234
1235 if (!range)
1236 return -EINVAL22;
1237
1238 memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range))__builtin_memcpy((range), (&smu13_thermal_policy[0]), (sizeof
(struct smu_temperature_range)))
;
1239
1240 range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] *
1241 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
1242 range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE5) *
1243 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
1244 range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] *
1245 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
1246 range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT5) *
1247 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
1248 range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] *
1249 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
1250 range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM5)*
1251 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
1252 range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
1253 range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
1254
1255 return 0;
1256}
1257
1258#ifndef MAX
1259#define MAX(a, b)(((a)>(b))?(a):(b)) ((a) > (b) ? (a) : (b))
1260#endif
1261static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
1262 void **table)
1263{
1264 struct smu_table_context *smu_table = &smu->smu_table;
1265 struct gpu_metrics_v1_3 *gpu_metrics =
1266 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
1267 SmuMetricsExternal_t metrics_ext;
1268 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
1269 int ret = 0;
1270
1271 ret = smu_cmn_get_metrics_table(smu,
1272 &metrics_ext,
1273 true1);
1274 if (ret)
1275 return ret;
1276
1277 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
1278
1279 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
1280 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
1281 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
1282 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
1283 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
1284 gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0],(((metrics->AvgTemperature[TEMP_VR_MEM0])>(metrics->
AvgTemperature[TEMP_VR_MEM1]))?(metrics->AvgTemperature[TEMP_VR_MEM0
]):(metrics->AvgTemperature[TEMP_VR_MEM1]))
1285 metrics->AvgTemperature[TEMP_VR_MEM1])(((metrics->AvgTemperature[TEMP_VR_MEM0])>(metrics->
AvgTemperature[TEMP_VR_MEM1]))?(metrics->AvgTemperature[TEMP_VR_MEM0
]):(metrics->AvgTemperature[TEMP_VR_MEM1]))
;
1286
1287 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
1288 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
1289 gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage,(((metrics->Vcn0ActivityPercentage)>(metrics->Vcn1ActivityPercentage
))?(metrics->Vcn0ActivityPercentage):(metrics->Vcn1ActivityPercentage
))
1290 metrics->Vcn1ActivityPercentage)(((metrics->Vcn0ActivityPercentage)>(metrics->Vcn1ActivityPercentage
))?(metrics->Vcn0ActivityPercentage):(metrics->Vcn1ActivityPercentage
))
;
1291
1292 gpu_metrics->average_socket_power = metrics->AverageSocketPower;
1293 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
1294
1295 if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD15)
1296 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
1297 else
1298 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
1299
1300 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD15)
1301 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
1302 else
1303 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
1304
1305 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
1306 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
1307 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
1308 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
1309
1310 gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
1311 gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
1312 gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
1313 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
1314 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
1315 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1];
1316 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1];
1317
1318 gpu_metrics->throttle_status =
1319 smu_v13_0_get_throttler_status(metrics);
1320 gpu_metrics->indep_throttle_status =
1321 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
1322 smu_v13_0_0_throttler_map);
1323
1324 gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
1325
1326 gpu_metrics->pcie_link_width = metrics->PcieWidth;
1327 gpu_metrics->pcie_link_speed = metrics->PcieRate;
1328
1329 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1330
1331 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_GFX];
1332 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_SOC];
1333 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VMEMP];
1334
1335 *table = (void *)gpu_metrics;
1336
1337 return sizeof(struct gpu_metrics_v1_3);
1338}
1339
1340static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu)
1341{
1342 struct smu_13_0_dpm_context *dpm_context =
1343 smu->smu_dpm.dpm_context;
1344 struct smu_13_0_dpm_table *gfx_table =
1345 &dpm_context->dpm_tables.gfx_table;
1346 struct smu_13_0_dpm_table *mem_table =
1347 &dpm_context->dpm_tables.uclk_table;
1348 struct smu_13_0_dpm_table *soc_table =
1349 &dpm_context->dpm_tables.soc_table;
1350 struct smu_13_0_dpm_table *vclk_table =
1351 &dpm_context->dpm_tables.vclk_table;
1352 struct smu_13_0_dpm_table *dclk_table =
1353 &dpm_context->dpm_tables.dclk_table;
1354 struct smu_13_0_dpm_table *fclk_table =
1355 &dpm_context->dpm_tables.fclk_table;
1356 struct smu_umd_pstate_table *pstate_table =
1357 &smu->pstate_table;
1358 struct smu_table_context *table_context = &smu->smu_table;
1359 PPTable_t *pptable = table_context->driver_pptable;
1360 DriverReportedClocks_t driver_clocks =
1361 pptable->SkuTable.DriverReportedClocks;
1362
1363 pstate_table->gfxclk_pstate.min = gfx_table->min;
1364 if (driver_clocks.GameClockAc &&
1365 (driver_clocks.GameClockAc < gfx_table->max))
1366 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
1367 else
1368 pstate_table->gfxclk_pstate.peak = gfx_table->max;
1369
1370 pstate_table->uclk_pstate.min = mem_table->min;
1371 pstate_table->uclk_pstate.peak = mem_table->max;
1372
1373 pstate_table->socclk_pstate.min = soc_table->min;
1374 pstate_table->socclk_pstate.peak = soc_table->max;
1375
1376 pstate_table->vclk_pstate.min = vclk_table->min;
1377 pstate_table->vclk_pstate.peak = vclk_table->max;
1378
1379 pstate_table->dclk_pstate.min = dclk_table->min;
1380 pstate_table->dclk_pstate.peak = dclk_table->max;
1381
1382 pstate_table->fclk_pstate.min = fclk_table->min;
1383 pstate_table->fclk_pstate.peak = fclk_table->max;
1384
1385 if (driver_clocks.BaseClockAc &&
1386 driver_clocks.BaseClockAc < gfx_table->max)
1387 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
1388 else
1389 pstate_table->gfxclk_pstate.standard = gfx_table->max;
1390 pstate_table->uclk_pstate.standard = mem_table->max;
1391 pstate_table->socclk_pstate.standard = soc_table->min;
1392 pstate_table->vclk_pstate.standard = vclk_table->min;
1393 pstate_table->dclk_pstate.standard = dclk_table->min;
1394 pstate_table->fclk_pstate.standard = fclk_table->min;
1395
1396 return 0;
1397}
1398
1399static void smu_v13_0_0_get_unique_id(struct smu_context *smu)
1400{
1401 struct smu_table_context *smu_table = &smu->smu_table;
1402 SmuMetrics_t *metrics =
1403 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
1404 struct amdgpu_device *adev = smu->adev;
1405 uint32_t upper32 = 0, lower32 = 0;
1406 int ret;
1407
1408 ret = smu_cmn_get_metrics_table(smu, NULL((void *)0), false0);
1409 if (ret)
1410 goto out;
1411
1412 upper32 = metrics->PublicSerialNumberUpper;
1413 lower32 = metrics->PublicSerialNumberLower;
1414
1415out:
1416 adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1417 if (adev->serial[0] == '\0')
1418 snprintf(adev->serial, sizeof(adev->serial), "%016llx", adev->unique_id);
1419}
1420
1421static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu,
1422 uint32_t *speed)
1423{
1424 int ret;
1425
1426 if (!speed)
1427 return -EINVAL22;
1428
1429 ret = smu_v13_0_0_get_smu_metrics_data(smu,
1430 METRICS_CURR_FANPWM,
1431 speed);
1432 if (ret) {
1433 dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!")printf("drm:pid%d:%s *ERROR* " "Failed to get fan speed(PWM)!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1434 return ret;
1435 }
1436
1437 /* Convert the PMFW output which is in percent to pwm(255) based */
1438 *speed = MIN(*speed * 255 / 100, 255)(((*speed * 255 / 100)<(255))?(*speed * 255 / 100):(255));
1439
1440 return 0;
1441}
1442
1443static int smu_v13_0_0_get_fan_speed_rpm(struct smu_context *smu,
1444 uint32_t *speed)
1445{
1446 if (!speed)
1447 return -EINVAL22;
1448
1449 return smu_v13_0_0_get_smu_metrics_data(smu,
1450 METRICS_CURR_FANSPEED,
1451 speed);
1452}
1453
1454static int smu_v13_0_0_enable_mgpu_fan_boost(struct smu_context *smu)
1455{
1456 struct smu_table_context *table_context = &smu->smu_table;
1457 PPTable_t *pptable = table_context->driver_pptable;
1458 SkuTable_t *skutable = &pptable->SkuTable;
1459
1460 /*
1461 * Skip the MGpuFanBoost setting for those ASICs
1462 * which do not support it
1463 */
1464 if (skutable->MGpuAcousticLimitRpmThreshold == 0)
1465 return 0;
1466
1467 return smu_cmn_send_smc_msg_with_param(smu,
1468 SMU_MSG_SetMGpuFanBoostLimitRpm,
1469 0,
1470 NULL((void *)0));
1471}
1472
1473static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
1474 uint32_t *current_power_limit,
1475 uint32_t *default_power_limit,
1476 uint32_t *max_power_limit)
1477{
1478 struct smu_table_context *table_context = &smu->smu_table;
1479 struct smu_13_0_0_powerplay_table *powerplay_table =
1480 (struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
1481 PPTable_t *pptable = table_context->driver_pptable;
1482 SkuTable_t *skutable = &pptable->SkuTable;
1483 uint32_t power_limit, od_percent;
1484
1485 if (smu_v13_0_get_current_power_limit(smu, &power_limit))
1486 power_limit = smu->adev->pm.ac_power ?
1487 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] :
1488 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0];
1489
1490 if (current_power_limit)
1491 *current_power_limit = power_limit;
1492 if (default_power_limit)
1493 *default_power_limit = power_limit;
1494
1495 if (max_power_limit) {
1496 if (smu->od_enabled) {
1497 od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE])((__uint32_t)(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE
]))
;
1498
1499 dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit)do { } while(0);
1500
1501 power_limit *= (100 + od_percent);
1502 power_limit /= 100;
1503 }
1504 *max_power_limit = power_limit;
1505 }
1506
1507 return 0;
1508}
1509
1510static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
1511 char *buf)
1512{
1513 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1514 DpmActivityMonitorCoeffInt_t *activity_monitor =
1515 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
1516 static const char *title[] = {
1517 "PROFILE_INDEX(NAME)",
1518 "CLOCK_TYPE(NAME)",
1519 "FPS",
1520 "MinActiveFreqType",
1521 "MinActiveFreq",
1522 "BoosterFreqType",
1523 "BoosterFreq",
1524 "PD_Data_limit_c",
1525 "PD_Data_error_coeff",
1526 "PD_Data_error_rate_coeff"};
1527 int16_t workload_type = 0;
1528 uint32_t i, size = 0;
1529 int result = 0;
1530
1531 if (!buf)
1532 return -EINVAL22;
1533
1534 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n",
1535 title[0], title[1], title[2], title[3], title[4], title[5],
1536 title[6], title[7], title[8], title[9]);
1537
1538 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1539 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1540 workload_type = smu_cmn_to_asic_specific_index(smu,
1541 CMN2ASIC_MAPPING_WORKLOAD,
1542 i);
1543 if (workload_type < 0)
1544 return -EINVAL22;
1545
1546 result = smu_cmn_update_table(smu,
1547 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1548 workload_type,
1549 (void *)(&activity_monitor_external),
1550 false0);
1551 if (result) {
1552 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__)printf("drm:pid%d:%s *ERROR* " "[%s] Failed to get activity monitor!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
;
1553 return result;
1554 }
1555
1556 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
1557 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1558
1559 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1560 " ",
1561 0,
1562 "GFXCLK",
1563 activity_monitor->Gfx_FPS,
1564 activity_monitor->Gfx_MinActiveFreqType,
1565 activity_monitor->Gfx_MinActiveFreq,
1566 activity_monitor->Gfx_BoosterFreqType,
1567 activity_monitor->Gfx_BoosterFreq,
1568 activity_monitor->Gfx_PD_Data_limit_c,
1569 activity_monitor->Gfx_PD_Data_error_coeff,
1570 activity_monitor->Gfx_PD_Data_error_rate_coeff);
1571
1572 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1573 " ",
1574 1,
1575 "FCLK",
1576 activity_monitor->Fclk_FPS,
1577 activity_monitor->Fclk_MinActiveFreqType,
1578 activity_monitor->Fclk_MinActiveFreq,
1579 activity_monitor->Fclk_BoosterFreqType,
1580 activity_monitor->Fclk_BoosterFreq,
1581 activity_monitor->Fclk_PD_Data_limit_c,
1582 activity_monitor->Fclk_PD_Data_error_coeff,
1583 activity_monitor->Fclk_PD_Data_error_rate_coeff);
1584 }
1585
1586 return size;
1587}
1588
1589static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
1590 long *input,
1591 uint32_t size)
1592{
1593 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1594 DpmActivityMonitorCoeffInt_t *activity_monitor =
1595 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
1596 int workload_type, ret = 0;
1597
1598 smu->power_profile_mode = input[size];
1599
1600 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1601 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode)printf("drm:pid%d:%s *ERROR* " "Invalid power profile mode %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , smu->
power_profile_mode)
;
1602 return -EINVAL22;
1603 }
1604
1605 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1606 ret = smu_cmn_update_table(smu,
1607 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1608 WORKLOAD_PPLIB_CUSTOM_BIT6,
1609 (void *)(&activity_monitor_external),
1610 false0);
1611 if (ret) {
1612 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__)printf("drm:pid%d:%s *ERROR* " "[%s] Failed to get activity monitor!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
;
1613 return ret;
1614 }
1615
1616 switch (input[0]) {
1617 case 0: /* Gfxclk */
1618 activity_monitor->Gfx_FPS = input[1];
1619 activity_monitor->Gfx_MinActiveFreqType = input[2];
1620 activity_monitor->Gfx_MinActiveFreq = input[3];
1621 activity_monitor->Gfx_BoosterFreqType = input[4];
1622 activity_monitor->Gfx_BoosterFreq = input[5];
1623 activity_monitor->Gfx_PD_Data_limit_c = input[6];
1624 activity_monitor->Gfx_PD_Data_error_coeff = input[7];
1625 activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
1626 break;
1627 case 1: /* Fclk */
1628 activity_monitor->Fclk_FPS = input[1];
1629 activity_monitor->Fclk_MinActiveFreqType = input[2];
1630 activity_monitor->Fclk_MinActiveFreq = input[3];
1631 activity_monitor->Fclk_BoosterFreqType = input[4];
1632 activity_monitor->Fclk_BoosterFreq = input[5];
1633 activity_monitor->Fclk_PD_Data_limit_c = input[6];
1634 activity_monitor->Fclk_PD_Data_error_coeff = input[7];
1635 activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
1636 break;
1637 }
1638
1639 ret = smu_cmn_update_table(smu,
1640 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1641 WORKLOAD_PPLIB_CUSTOM_BIT6,
1642 (void *)(&activity_monitor_external),
1643 true1);
1644 if (ret) {
1645 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__)printf("drm:pid%d:%s *ERROR* " "[%s] Failed to set activity monitor!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
;
1646 return ret;
1647 }
1648 }
1649
1650 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1651 workload_type = smu_cmn_to_asic_specific_index(smu,
1652 CMN2ASIC_MAPPING_WORKLOAD,
1653 smu->power_profile_mode);
1654
1655 if (workload_type < 0)
1656 return -EINVAL22;
1657
1658 return smu_cmn_send_smc_msg_with_param(smu,
1659 SMU_MSG_SetWorkloadMask,
1660 1 << workload_type,
1661 NULL((void *)0));
1662}
1663
1664static int smu_v13_0_0_baco_enter(struct smu_context *smu)
1665{
1666 struct smu_baco_context *smu_baco = &smu->smu_baco;
1667 struct amdgpu_device *adev = smu->adev;
1668
1669 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
1670 return smu_v13_0_baco_set_armd3_sequence(smu,
1671 smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
1672 else
1673 return smu_v13_0_baco_enter(smu);
1674}
1675
1676static int smu_v13_0_0_baco_exit(struct smu_context *smu)
1677{
1678 struct amdgpu_device *adev = smu->adev;
1679
1680 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
1681 /* Wait for PMFW handling for the Dstate change */
1682 usleep_range(10000, 11000);
1683 return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
1684 } else {
1685 return smu_v13_0_baco_exit(smu);
1686 }
1687}
1688
1689static bool_Bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
1690{
1691 struct amdgpu_device *adev = smu->adev;
1692 u32 smu_version;
1693
1694 /* SRIOV does not support SMU mode1 reset */
1695 if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2)))
1696 return false0;
1697
1698 /* PMFW support is available since 78.41 */
1699 smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version);
1700 if (smu_version < 0x004e2900)
1701 return false0;
1702
1703 return true1;
1704}
1705
1706static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
1707 struct i2c_msg *msg, int num_msgs)
1708{
1709 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
1710 struct amdgpu_device *adev = smu_i2c->adev;
1711 struct smu_context *smu = adev->powerplay.pp_handle;
1712 struct smu_table_context *smu_table = &smu->smu_table;
1713 struct smu_table *table = &smu_table->driver_table;
1714 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
1715 int i, j, r, c;
1716 u16 dir;
1717
1718 if (!adev->pm.dpm_enabled)
1719 return -EBUSY16;
1720
1721 req = kzalloc(sizeof(*req), GFP_KERNEL(0x0001 | 0x0004));
1722 if (!req)
1723 return -ENOMEM12;
1724
1725 req->I2CcontrollerPort = smu_i2c->port;
1726 req->I2CSpeed = I2C_SPEED_FAST_400K;
1727 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
1728 dir = msg[0].flags & I2C_M_RD0x0001;
1729
1730 for (c = i = 0; i < num_msgs; i++) {
1731 for (j = 0; j < msg[i].len; j++, c++) {
1732 SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
1733
1734 if (!(msg[i].flags & I2C_M_RD0x0001)) {
1735 /* write */
1736 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK(1 << 2);
1737 cmd->ReadWriteData = msg[i].buf[j];
1738 }
1739
1740 if ((dir ^ msg[i].flags) & I2C_M_RD0x0001) {
1741 /* The direction changes.
1742 */
1743 dir = msg[i].flags & I2C_M_RD0x0001;
1744 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK(1 << 1);
1745 }
1746
1747 req->NumCmds++;
1748
1749 /*
1750 * Insert STOP if we are at the last byte of either last
1751 * message for the transaction or the client explicitly
1752 * requires a STOP at this particular message.
1753 */
1754 if ((j == msg[i].len - 1) &&
1755 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP0x0004))) {
1756 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK(1 << 1);
1757 cmd->CmdConfig |= CMDCONFIG_STOP_MASK(1 << 0);
1758 }
1759 }
1760 }
1761 mutex_lock(&adev->pm.mutex)rw_enter_write(&adev->pm.mutex);
1762 r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true1);
1763 mutex_unlock(&adev->pm.mutex)rw_exit_write(&adev->pm.mutex);
1764 if (r)
1765 goto fail;
1766
1767 for (c = i = 0; i < num_msgs; i++) {
1768 if (!(msg[i].flags & I2C_M_RD0x0001)) {
1769 c += msg[i].len;
1770 continue;
1771 }
1772 for (j = 0; j < msg[i].len; j++, c++) {
1773 SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
1774
1775 msg[i].buf[j] = cmd->ReadWriteData;
1776 }
1777 }
1778 r = num_msgs;
1779fail:
1780 kfree(req);
1781 return r;
1782}
1783
1784static u32 smu_v13_0_0_i2c_func(struct i2c_adapter *adap)
1785{
1786 return I2C_FUNC_I2C0 | I2C_FUNC_SMBUS_EMUL0;
1787}
1788
1789static const struct i2c_algorithm smu_v13_0_0_i2c_algo = {
1790 .master_xfer = smu_v13_0_0_i2c_xfer,
1791 .functionality = smu_v13_0_0_i2c_func,
1792};
1793
1794static const struct i2c_adapter_quirks smu_v13_0_0_i2c_control_quirks = {
1795 .flags = I2C_AQ_COMB0 | I2C_AQ_COMB_SAME_ADDR0 | I2C_AQ_NO_ZERO_LEN0,
1796 .max_read_len = MAX_SW_I2C_COMMANDS24,
1797 .max_write_len = MAX_SW_I2C_COMMANDS24,
1798 .max_comb_1st_msg_len = 2,
1799 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS24 - 2,
1800};
1801
1802static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
1803{
1804 struct amdgpu_device *adev = smu->adev;
1805 int res, i;
1806
1807 for (i = 0; i < MAX_SMU_I2C_BUSES2; i++) {
1808 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1809 struct i2c_adapter *control = &smu_i2c->adapter;
1810
1811 smu_i2c->adev = adev;
1812 smu_i2c->port = i;
1813 rw_init(&smu_i2c->mutex, "smu13iic")_rw_init_flags(&smu_i2c->mutex, "smu13iic", 0, ((void *
)0))
;
1814#ifdef __linux__
1815 control->owner = THIS_MODULE((void *)0);
1816 control->class = I2C_CLASS_SPD;
1817 control->dev.parent = &adev->pdev->dev;
1818#endif
1819 control->algo = &smu_v13_0_0_i2c_algo;
1820 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
1821 control->quirks = &smu_v13_0_0_i2c_control_quirks;
1822 i2c_set_adapdata(control, smu_i2c);
1823
1824 res = i2c_add_adapter(control);
1825 if (res) {
1826 DRM_ERROR("Failed to register hw i2c, err: %d\n", res)__drm_err("Failed to register hw i2c, err: %d\n", res);
1827 goto Out_err;
1828 }
1829 }
1830
1831 /* assign the buses used for the FRU EEPROM and RAS EEPROM */
1832 /* XXX ideally this would be something in a vbios data table */
1833 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
1834 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1835
1836 return 0;
1837Out_err:
1838 for ( ; i >= 0; i--) {
1839 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1840 struct i2c_adapter *control = &smu_i2c->adapter;
1841
1842 i2c_del_adapter(control);
1843 }
1844 return res;
1845}
1846
1847static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu)
1848{
1849 struct amdgpu_device *adev = smu->adev;
1850 int i;
1851
1852 for (i = 0; i < MAX_SMU_I2C_BUSES2; i++) {
1853 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1854 struct i2c_adapter *control = &smu_i2c->adapter;
1855
1856 i2c_del_adapter(control);
1857 }
1858 adev->pm.ras_eeprom_i2c_bus = NULL((void *)0);
1859 adev->pm.fru_eeprom_i2c_bus = NULL((void *)0);
1860}
1861
1862static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
1863 enum pp_mp1_state mp1_state)
1864{
1865 int ret;
1866
1867 switch (mp1_state) {
1868 case PP_MP1_STATE_UNLOAD:
1869 ret = smu_cmn_set_mp1_state(smu, mp1_state);
1870 break;
1871 default:
1872 /* Ignore others */
1873 ret = 0;
1874 }
1875
1876 return ret;
1877}
1878
1879static int smu_v13_0_0_set_df_cstate(struct smu_context *smu,
1880 enum pp_df_cstate state)
1881{
1882 return smu_cmn_send_smc_msg_with_param(smu,
1883 SMU_MSG_DFCstateControl,
1884 state,
1885 NULL((void *)0));
1886}
1887
1888static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
1889{
1890 int ret;
1891 struct amdgpu_device *adev = smu->adev;
1892
1893 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)(((13) << 16) | ((0) << 8) | (10)))
1894 ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset2);
1895 else
1896 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL((void *)0));
1897
1898 if (!ret)
1899 drm_msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS)mdelay(500);
1900
1901 return ret;
1902}
1903
1904static void smu_v13_0_0_set_smu_mailbox_registers(struct smu_context *smu)
1905{
1906 struct amdgpu_device *adev = smu->adev;
1907
1908 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82)(adev->reg_offset[MP1_HWIP][0][0] + 0x0292);
1909 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66)(adev->reg_offset[MP1_HWIP][0][0] + 0x0282);
1910 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90)(adev->reg_offset[MP1_HWIP][0][0] + 0x029a);
1911
1912 smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_53)(adev->reg_offset[MP1_HWIP][0][0] + 0x0275);
1913 smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_75)(adev->reg_offset[MP1_HWIP][0][0] + 0x028b);
1914 smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_54)(adev->reg_offset[MP1_HWIP][0][0] + 0x0276);
1915}
1916
1917static int smu_v13_0_0_smu_send_bad_mem_page_num(struct smu_context *smu,
1918 uint32_t size)
1919{
1920 int ret = 0;
1921
1922 /* message SMU to update the bad page number on SMUBUS */
1923 ret = smu_cmn_send_smc_msg_with_param(smu,
1924 SMU_MSG_SetNumBadMemoryPagesRetired,
1925 size, NULL((void *)0));
1926 if (ret)
1927 dev_err(smu->adev->dev,printf("drm:pid%d:%s *ERROR* " "[%s] failed to message SMU to update bad memory pages number\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
1928 "[%s] failed to message SMU to update bad memory pages number\n",printf("drm:pid%d:%s *ERROR* " "[%s] failed to message SMU to update bad memory pages number\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
1929 __func__)printf("drm:pid%d:%s *ERROR* " "[%s] failed to message SMU to update bad memory pages number\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
;
1930
1931 return ret;
1932}
1933
1934static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu,
1935 uint32_t size)
1936{
1937 int ret = 0;
1938
1939 /* message SMU to update the bad channel info on SMUBUS */
1940 ret = smu_cmn_send_smc_msg_with_param(smu,
1941 SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,
1942 size, NULL((void *)0));
1943 if (ret)
1944 dev_err(smu->adev->dev,printf("drm:pid%d:%s *ERROR* " "[%s] failed to message SMU to update bad memory pages channel info\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
1945 "[%s] failed to message SMU to update bad memory pages channel info\n",printf("drm:pid%d:%s *ERROR* " "[%s] failed to message SMU to update bad memory pages channel info\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
1946 __func__)printf("drm:pid%d:%s *ERROR* " "[%s] failed to message SMU to update bad memory pages channel info\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
;
1947
1948 return ret;
1949}
1950
1951static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
1952 .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
1953 .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
1954 .i2c_init = smu_v13_0_0_i2c_control_init,
1955 .i2c_fini = smu_v13_0_0_i2c_control_fini,
1956 .is_dpm_running = smu_v13_0_0_is_dpm_running,
1957 .dump_pptable = smu_v13_0_0_dump_pptable,
1958 .init_microcode = smu_v13_0_init_microcode,
1959 .load_microcode = smu_v13_0_load_microcode,
1960 .fini_microcode = smu_v13_0_fini_microcode,
1961 .init_smc_tables = smu_v13_0_0_init_smc_tables,
1962 .fini_smc_tables = smu_v13_0_fini_smc_tables,
1963 .init_power = smu_v13_0_init_power,
1964 .fini_power = smu_v13_0_fini_power,
1965 .check_fw_status = smu_v13_0_check_fw_status,
1966 .setup_pptable = smu_v13_0_0_setup_pptable,
1967 .check_fw_version = smu_v13_0_check_fw_version,
1968 .write_pptable = smu_cmn_write_pptable,
1969 .set_driver_table_location = smu_v13_0_set_driver_table_location,
1970 .system_features_control = smu_v13_0_0_system_features_control,
1971 .set_allowed_mask = smu_v13_0_set_allowed_mask,
1972 .get_enabled_mask = smu_cmn_get_enabled_mask,
1973 .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable,
1974 .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
1975 .get_dpm_ultimate_freq = smu_v13_0_0_get_dpm_ultimate_freq,
1976 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
1977 .read_sensor = smu_v13_0_0_read_sensor,
1978 .feature_is_enabled = smu_cmn_feature_is_enabled,
1979 .print_clk_levels = smu_v13_0_0_print_clk_levels,
1980 .force_clk_levels = smu_v13_0_0_force_clk_levels,
1981 .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
1982 .get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
1983 .register_irq_handler = smu_v13_0_register_irq_handler,
1984 .enable_thermal_alert = smu_v13_0_enable_thermal_alert,
1985 .disable_thermal_alert = smu_v13_0_disable_thermal_alert,
1986 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
1987 .get_gpu_metrics = smu_v13_0_0_get_gpu_metrics,
1988 .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range,
1989 .init_pptable_microcode = smu_v13_0_init_pptable_microcode,
1990 .populate_umd_state_clk = smu_v13_0_0_populate_umd_state_clk,
1991 .set_performance_level = smu_v13_0_set_performance_level,
1992 .gfx_off_control = smu_v13_0_gfx_off_control,
1993 .get_unique_id = smu_v13_0_0_get_unique_id,
1994 .get_fan_speed_pwm = smu_v13_0_0_get_fan_speed_pwm,
1995 .get_fan_speed_rpm = smu_v13_0_0_get_fan_speed_rpm,
1996 .set_fan_speed_pwm = smu_v13_0_set_fan_speed_pwm,
1997 .set_fan_speed_rpm = smu_v13_0_set_fan_speed_rpm,
1998 .get_fan_control_mode = smu_v13_0_get_fan_control_mode,
1999 .set_fan_control_mode = smu_v13_0_set_fan_control_mode,
2000 .enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost,
2001 .get_power_limit = smu_v13_0_0_get_power_limit,
2002 .set_power_limit = smu_v13_0_set_power_limit,
2003 .set_power_source = smu_v13_0_set_power_source,
2004 .get_power_profile_mode = smu_v13_0_0_get_power_profile_mode,
2005 .set_power_profile_mode = smu_v13_0_0_set_power_profile_mode,
2006 .run_btc = smu_v13_0_run_btc,
2007 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2008 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2009 .set_tool_table_location = smu_v13_0_set_tool_table_location,
2010 .deep_sleep_control = smu_v13_0_deep_sleep_control,
2011 .gfx_ulv_control = smu_v13_0_gfx_ulv_control,
2012 .baco_is_support = smu_v13_0_baco_is_support,
2013 .baco_get_state = smu_v13_0_baco_get_state,
2014 .baco_set_state = smu_v13_0_baco_set_state,
2015 .baco_enter = smu_v13_0_0_baco_enter,
2016 .baco_exit = smu_v13_0_0_baco_exit,
2017 .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
2018 .mode1_reset = smu_v13_0_0_mode1_reset,
2019 .set_mp1_state = smu_v13_0_0_set_mp1_state,
2020 .set_df_cstate = smu_v13_0_0_set_df_cstate,
2021 .send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
2022 .send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
2023 .gpo_control = smu_v13_0_gpo_control,
2024};
2025
2026void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
2027{
2028 smu->ppt_funcs = &smu_v13_0_0_ppt_funcs;
2029 smu->message_map = smu_v13_0_0_message_map;
2030 smu->clock_map = smu_v13_0_0_clk_map;
2031 smu->feature_map = smu_v13_0_0_feature_mask_map;
2032 smu->table_map = smu_v13_0_0_table_map;
2033 smu->pwr_src_map = smu_v13_0_0_pwr_src_map;
2034 smu->workload_map = smu_v13_0_0_workload_map;
2035 smu_v13_0_0_set_smu_mailbox_registers(smu);
2036}