File: | dev/pci/drm/amd/pm/swsmu/smu11/arcturus_ppt.c |
Warning: | line 1594, column 13 Value stored to 'pptable' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #define SWSMU_CODE_LAYER_L2 |
25 | |
26 | #include <linux/firmware.h> |
27 | #include "amdgpu.h" |
28 | #include "amdgpu_dpm.h" |
29 | #include "amdgpu_smu.h" |
30 | #include "atomfirmware.h" |
31 | #include "amdgpu_atomfirmware.h" |
32 | #include "amdgpu_atombios.h" |
33 | #include "smu_v11_0.h" |
34 | #include "smu11_driver_if_arcturus.h" |
35 | #include "soc15_common.h" |
36 | #include "atom.h" |
37 | #include "arcturus_ppt.h" |
38 | #include "smu_v11_0_pptable.h" |
39 | #include "arcturus_ppsmc.h" |
40 | #include "nbio/nbio_7_4_offset.h" |
41 | #include "nbio/nbio_7_4_sh_mask.h" |
42 | #include "thm/thm_11_0_2_offset.h" |
43 | #include "thm/thm_11_0_2_sh_mask.h" |
44 | #include "amdgpu_xgmi.h" |
45 | #include <linux/i2c.h> |
46 | #include <linux/pci.h> |
47 | #include "amdgpu_ras.h" |
48 | #include "smu_cmn.h" |
49 | |
50 | /* |
51 | * DO NOT use these for err/warn/info/debug messages. |
52 | * Use dev_err, dev_warn, dev_info and dev_dbg instead. |
53 | * They are more MGPU friendly. |
54 | */ |
55 | #undef pr_err |
56 | #undef pr_warn |
57 | #undef pr_info |
58 | #undef pr_debug |
59 | |
60 | #define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature)[smu_feature] = {1, (arcturus_feature)} \ |
61 | [smu_feature] = {1, (arcturus_feature)} |
62 | |
63 | #define SMU_FEATURES_LOW_MASK0x00000000FFFFFFFF 0x00000000FFFFFFFF |
64 | #define SMU_FEATURES_LOW_SHIFT0 0 |
65 | #define SMU_FEATURES_HIGH_MASK0xFFFFFFFF00000000 0xFFFFFFFF00000000 |
66 | #define SMU_FEATURES_HIGH_SHIFT32 32 |
67 | |
68 | #define SMC_DPM_FEATURE( (1 << 0 ) | (1 << 1 ) | (1 << 2 ) | (1 << 3 ) | (1 << 5 ) | (1 << 4 ) | (1 << 6 )) ( \ |
69 | FEATURE_DPM_PREFETCHER_MASK(1 << 0 ) | \ |
70 | FEATURE_DPM_GFXCLK_MASK(1 << 1 ) | \ |
71 | FEATURE_DPM_UCLK_MASK(1 << 2 ) | \ |
72 | FEATURE_DPM_SOCCLK_MASK(1 << 3 ) | \ |
73 | FEATURE_DPM_MP0CLK_MASK(1 << 5 ) | \ |
74 | FEATURE_DPM_FCLK_MASK(1 << 4 ) | \ |
75 | FEATURE_DPM_XGMI_MASK(1 << 6 )) |
76 | |
77 | /* possible frequency drift (1Mhz) */ |
78 | #define EPSILON1 1 |
79 | |
80 | #define smnPCIE_ESM_CTRL0x111003D0 0x111003D0 |
81 | |
82 | #define mmCG_FDO_CTRL0_ARCT0x8B 0x8B |
83 | #define mmCG_FDO_CTRL0_ARCT_BASE_IDX0 0 |
84 | |
85 | #define mmCG_FDO_CTRL1_ARCT0x8C 0x8C |
86 | #define mmCG_FDO_CTRL1_ARCT_BASE_IDX0 0 |
87 | |
88 | #define mmCG_FDO_CTRL2_ARCT0x8D 0x8D |
89 | #define mmCG_FDO_CTRL2_ARCT_BASE_IDX0 0 |
90 | |
91 | #define mmCG_TACH_CTRL_ARCT0x8E 0x8E |
92 | #define mmCG_TACH_CTRL_ARCT_BASE_IDX0 0 |
93 | |
94 | #define mmCG_TACH_STATUS_ARCT0x8F 0x8F |
95 | #define mmCG_TACH_STATUS_ARCT_BASE_IDX0 0 |
96 | |
97 | #define mmCG_THERMAL_STATUS_ARCT0x90 0x90 |
98 | #define mmCG_THERMAL_STATUS_ARCT_BASE_IDX0 0 |
99 | |
100 | static const struct cmn2asic_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = { |
101 | MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0)[SMU_MSG_TestMessage] = {1, (0x1), (0)}, |
102 | MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1)[SMU_MSG_GetSmuVersion] = {1, (0x2), (1)}, |
103 | MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1)[SMU_MSG_GetDriverIfVersion] = {1, (0x3), (1)}, |
104 | MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0)[SMU_MSG_SetAllowedFeaturesMaskLow] = {1, (0x4), (0)}, |
105 | MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0)[SMU_MSG_SetAllowedFeaturesMaskHigh] = {1, (0x5), (0)}, |
106 | MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0)[SMU_MSG_EnableAllSmuFeatures] = {1, (0x6), (0)}, |
107 | MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0)[SMU_MSG_DisableAllSmuFeatures] = {1, (0x7), (0)}, |
108 | MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1)[SMU_MSG_EnableSmuFeaturesLow] = {1, (0x8), (1)}, |
109 | MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1)[SMU_MSG_EnableSmuFeaturesHigh] = {1, (0x9), (1)}, |
110 | MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 0)[SMU_MSG_DisableSmuFeaturesLow] = {1, (0xA), (0)}, |
111 | MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 0)[SMU_MSG_DisableSmuFeaturesHigh] = {1, (0xB), (0)}, |
112 | MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 0)[SMU_MSG_GetEnabledSmuFeaturesLow] = {1, (0xC), (0)}, |
113 | MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 0)[SMU_MSG_GetEnabledSmuFeaturesHigh] = {1, (0xD), (0)}, |
114 | MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1)[SMU_MSG_SetDriverDramAddrHigh] = {1, (0xE), (1)}, |
115 | MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1)[SMU_MSG_SetDriverDramAddrLow] = {1, (0xF), (1)}, |
116 | MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0)[SMU_MSG_SetToolsDramAddrHigh] = {1, (0x10), (0)}, |
117 | MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0)[SMU_MSG_SetToolsDramAddrLow] = {1, (0x11), (0)}, |
118 | MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1)[SMU_MSG_TransferTableSmu2Dram] = {1, (0x12), (1)}, |
119 | MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0)[SMU_MSG_TransferTableDram2Smu] = {1, (0x13), (0)}, |
120 | MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0)[SMU_MSG_UseDefaultPPTable] = {1, (0x14), (0)}, |
121 | MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable, 0)[SMU_MSG_UseBackupPPTable] = {1, (0x15), (0)}, |
122 | MSG_MAP(SetSystemVirtualDramAddrHigh, PPSMC_MSG_SetSystemVirtualDramAddrHigh, 0)[SMU_MSG_SetSystemVirtualDramAddrHigh] = {1, (0x16), (0)}, |
123 | MSG_MAP(SetSystemVirtualDramAddrLow, PPSMC_MSG_SetSystemVirtualDramAddrLow, 0)[SMU_MSG_SetSystemVirtualDramAddrLow] = {1, (0x17), (0)}, |
124 | MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0)[SMU_MSG_EnterBaco] = {1, (0x18), (0)}, |
125 | MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0)[SMU_MSG_ExitBaco] = {1, (0x19), (0)}, |
126 | MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0)[SMU_MSG_ArmD3] = {1, (0x1A), (0)}, |
127 | MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0)[SMU_MSG_SetSoftMinByFreq] = {1, (0x1B), (0)}, |
128 | MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0)[SMU_MSG_SetSoftMaxByFreq] = {1, (0x1C), (0)}, |
129 | MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0)[SMU_MSG_SetHardMinByFreq] = {1, (0x1D), (0)}, |
130 | MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0)[SMU_MSG_SetHardMaxByFreq] = {1, (0x1E), (0)}, |
131 | MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 0)[SMU_MSG_GetMinDpmFreq] = {1, (0x1F), (0)}, |
132 | MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 0)[SMU_MSG_GetMaxDpmFreq] = {1, (0x20), (0)}, |
133 | MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1)[SMU_MSG_GetDpmFreqByIndex] = {1, (0x21), (1)}, |
134 | MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1)[SMU_MSG_SetWorkloadMask] = {1, (0x22), (1)}, |
135 | MSG_MAP(SetDfSwitchType, PPSMC_MSG_SetDfSwitchType, 0)[SMU_MSG_SetDfSwitchType] = {1, (0x23), (0)}, |
136 | MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm, 0)[SMU_MSG_GetVoltageByDpm] = {1, (0x24), (0)}, |
137 | MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0)[SMU_MSG_GetVoltageByDpmOverdrive] = {1, (0x25), (0)}, |
138 | MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0)[SMU_MSG_SetPptLimit] = {1, (0x26), (0)}, |
139 | MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1)[SMU_MSG_GetPptLimit] = {1, (0x27), (1)}, |
140 | MSG_MAP(PowerUpVcn0, PPSMC_MSG_PowerUpVcn0, 0)[SMU_MSG_PowerUpVcn0] = {1, (0x28), (0)}, |
141 | MSG_MAP(PowerDownVcn0, PPSMC_MSG_PowerDownVcn0, 0)[SMU_MSG_PowerDownVcn0] = {1, (0x29), (0)}, |
142 | MSG_MAP(PowerUpVcn1, PPSMC_MSG_PowerUpVcn1, 0)[SMU_MSG_PowerUpVcn1] = {1, (0x2A), (0)}, |
143 | MSG_MAP(PowerDownVcn1, PPSMC_MSG_PowerDownVcn1, 0)[SMU_MSG_PowerDownVcn1] = {1, (0x2B), (0)}, |
144 | MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0)[SMU_MSG_PrepareMp1ForUnload] = {1, (0x2C), (0)}, |
145 | MSG_MAP(PrepareMp1ForReset, PPSMC_MSG_PrepareMp1ForReset, 0)[SMU_MSG_PrepareMp1ForReset] = {1, (0x2D), (0)}, |
146 | MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown, 0)[SMU_MSG_PrepareMp1ForShutdown] = {1, (0x2E), (0)}, |
147 | MSG_MAP(SoftReset, PPSMC_MSG_SoftReset, 0)[SMU_MSG_SoftReset] = {1, (0x2F), (0)}, |
148 | MSG_MAP(RunAfllBtc, PPSMC_MSG_RunAfllBtc, 0)[SMU_MSG_RunAfllBtc] = {1, (0x30), (0)}, |
149 | MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0)[SMU_MSG_RunDcBtc] = {1, (0x31), (0)}, |
150 | MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0)[SMU_MSG_DramLogSetDramAddrHigh] = {1, (0x33), (0)}, |
151 | MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0)[SMU_MSG_DramLogSetDramAddrLow] = {1, (0x34), (0)}, |
152 | MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0)[SMU_MSG_DramLogSetDramSize] = {1, (0x35), (0)}, |
153 | MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0)[SMU_MSG_GetDebugData] = {1, (0x36), (0)}, |
154 | MSG_MAP(WaflTest, PPSMC_MSG_WaflTest, 0)[SMU_MSG_WaflTest] = {1, (0x37), (0)}, |
155 | MSG_MAP(SetXgmiMode, PPSMC_MSG_SetXgmiMode, 0)[SMU_MSG_SetXgmiMode] = {1, (0x38), (0)}, |
156 | MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable, 0)[SMU_MSG_SetMemoryChannelEnable] = {1, (0x39), (0)}, |
157 | MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0)[SMU_MSG_DFCstateControl] = {1, (0x3B), (0)}, |
158 | MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0)[SMU_MSG_GmiPwrDnControl] = {1, (0x3D), (0)}, |
159 | MSG_MAP(ReadSerialNumTop32, PPSMC_MSG_ReadSerialNumTop32, 1)[SMU_MSG_ReadSerialNumTop32] = {1, (0x40), (1)}, |
160 | MSG_MAP(ReadSerialNumBottom32, PPSMC_MSG_ReadSerialNumBottom32, 1)[SMU_MSG_ReadSerialNumBottom32] = {1, (0x41), (1)}, |
161 | MSG_MAP(LightSBR, PPSMC_MSG_LightSBR, 0)[SMU_MSG_LightSBR] = {1, (0x42), (0)}, |
162 | }; |
163 | |
164 | static const struct cmn2asic_mapping arcturus_clk_map[SMU_CLK_COUNT] = { |
165 | CLK_MAP(GFXCLK, PPCLK_GFXCLK)[SMU_GFXCLK] = {1, (PPCLK_GFXCLK)}, |
166 | CLK_MAP(SCLK, PPCLK_GFXCLK)[SMU_SCLK] = {1, (PPCLK_GFXCLK)}, |
167 | CLK_MAP(SOCCLK, PPCLK_SOCCLK)[SMU_SOCCLK] = {1, (PPCLK_SOCCLK)}, |
168 | CLK_MAP(FCLK, PPCLK_FCLK)[SMU_FCLK] = {1, (PPCLK_FCLK)}, |
169 | CLK_MAP(UCLK, PPCLK_UCLK)[SMU_UCLK] = {1, (PPCLK_UCLK)}, |
170 | CLK_MAP(MCLK, PPCLK_UCLK)[SMU_MCLK] = {1, (PPCLK_UCLK)}, |
171 | CLK_MAP(DCLK, PPCLK_DCLK)[SMU_DCLK] = {1, (PPCLK_DCLK)}, |
172 | CLK_MAP(VCLK, PPCLK_VCLK)[SMU_VCLK] = {1, (PPCLK_VCLK)}, |
173 | }; |
174 | |
175 | static const struct cmn2asic_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT] = { |
176 | FEA_MAP(DPM_PREFETCHER)[SMU_FEATURE_DPM_PREFETCHER_BIT] = {1, 0}, |
177 | FEA_MAP(DPM_GFXCLK)[SMU_FEATURE_DPM_GFXCLK_BIT] = {1, 1}, |
178 | FEA_MAP(DPM_UCLK)[SMU_FEATURE_DPM_UCLK_BIT] = {1, 2}, |
179 | FEA_MAP(DPM_SOCCLK)[SMU_FEATURE_DPM_SOCCLK_BIT] = {1, 3}, |
180 | FEA_MAP(DPM_FCLK)[SMU_FEATURE_DPM_FCLK_BIT] = {1, 4}, |
181 | FEA_MAP(DPM_MP0CLK)[SMU_FEATURE_DPM_MP0CLK_BIT] = {1, 5}, |
182 | FEA_MAP(DPM_XGMI)[SMU_FEATURE_DPM_XGMI_BIT] = {1, 6}, |
183 | FEA_MAP(DS_GFXCLK)[SMU_FEATURE_DS_GFXCLK_BIT] = {1, 7}, |
184 | FEA_MAP(DS_SOCCLK)[SMU_FEATURE_DS_SOCCLK_BIT] = {1, 8}, |
185 | FEA_MAP(DS_LCLK)[SMU_FEATURE_DS_LCLK_BIT] = {1, 9}, |
186 | FEA_MAP(DS_FCLK)[SMU_FEATURE_DS_FCLK_BIT] = {1, 10}, |
187 | FEA_MAP(DS_UCLK)[SMU_FEATURE_DS_UCLK_BIT] = {1, 11}, |
188 | FEA_MAP(GFX_ULV)[SMU_FEATURE_GFX_ULV_BIT] = {1, 12}, |
189 | ARCTURUS_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN_BIT)[SMU_FEATURE_VCN_DPM_BIT] = {1, (13)}, |
190 | FEA_MAP(RSMU_SMN_CG)[SMU_FEATURE_RSMU_SMN_CG_BIT] = {1, 14}, |
191 | FEA_MAP(WAFL_CG)[SMU_FEATURE_WAFL_CG_BIT] = {1, 15}, |
192 | FEA_MAP(PPT)[SMU_FEATURE_PPT_BIT] = {1, 16}, |
193 | FEA_MAP(TDC)[SMU_FEATURE_TDC_BIT] = {1, 17}, |
194 | FEA_MAP(APCC_PLUS)[SMU_FEATURE_APCC_PLUS_BIT] = {1, 18}, |
195 | FEA_MAP(VR0HOT)[SMU_FEATURE_VR0HOT_BIT] = {1, 19}, |
196 | FEA_MAP(VR1HOT)[SMU_FEATURE_VR1HOT_BIT] = {1, 20}, |
197 | FEA_MAP(FW_CTF)[SMU_FEATURE_FW_CTF_BIT] = {1, 21}, |
198 | FEA_MAP(FAN_CONTROL)[SMU_FEATURE_FAN_CONTROL_BIT] = {1, 22}, |
199 | FEA_MAP(THERMAL)[SMU_FEATURE_THERMAL_BIT] = {1, 23}, |
200 | FEA_MAP(OUT_OF_BAND_MONITOR)[SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT] = {1, 24}, |
201 | FEA_MAP(TEMP_DEPENDENT_VMIN)[SMU_FEATURE_TEMP_DEPENDENT_VMIN_BIT] = {1, 25}, |
202 | }; |
203 | |
204 | static const struct cmn2asic_mapping arcturus_table_map[SMU_TABLE_COUNT] = { |
205 | TAB_MAP(PPTABLE)[SMU_TABLE_PPTABLE] = {1, 0}, |
206 | TAB_MAP(AVFS)[SMU_TABLE_AVFS] = {1, 1}, |
207 | TAB_MAP(AVFS_PSM_DEBUG)[SMU_TABLE_AVFS_PSM_DEBUG] = {1, 2}, |
208 | TAB_MAP(AVFS_FUSE_OVERRIDE)[SMU_TABLE_AVFS_FUSE_OVERRIDE] = {1, 3}, |
209 | TAB_MAP(PMSTATUSLOG)[SMU_TABLE_PMSTATUSLOG] = {1, 4}, |
210 | TAB_MAP(SMU_METRICS)[SMU_TABLE_SMU_METRICS] = {1, 5}, |
211 | TAB_MAP(DRIVER_SMU_CONFIG)[SMU_TABLE_DRIVER_SMU_CONFIG] = {1, 6}, |
212 | TAB_MAP(OVERDRIVE)[SMU_TABLE_OVERDRIVE] = {1, 7}, |
213 | TAB_MAP(I2C_COMMANDS)[SMU_TABLE_I2C_COMMANDS] = {1, 9}, |
214 | TAB_MAP(ACTIVITY_MONITOR_COEFF)[SMU_TABLE_ACTIVITY_MONITOR_COEFF] = {1, 10}, |
215 | }; |
216 | |
217 | static const struct cmn2asic_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { |
218 | PWR_MAP(AC)[SMU_POWER_SOURCE_AC] = {1, POWER_SOURCE_AC}, |
219 | PWR_MAP(DC)[SMU_POWER_SOURCE_DC] = {1, POWER_SOURCE_DC}, |
220 | }; |
221 | |
222 | static const struct cmn2asic_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { |
223 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT)[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = {1, (0)}, |
224 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT)[PP_SMC_POWER_PROFILE_POWERSAVING] = {1, (1)}, |
225 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT)[PP_SMC_POWER_PROFILE_VIDEO] = {1, (2)}, |
226 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT)[PP_SMC_POWER_PROFILE_COMPUTE] = {1, (3)}, |
227 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT)[PP_SMC_POWER_PROFILE_CUSTOM] = {1, (4)}, |
228 | }; |
229 | |
230 | static const uint8_t arcturus_throttler_map[] = { |
231 | [THROTTLER_TEMP_EDGE_BIT1] = (SMU_THROTTLER_TEMP_EDGE_BIT35), |
232 | [THROTTLER_TEMP_HOTSPOT_BIT2] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT36), |
233 | [THROTTLER_TEMP_MEM_BIT3] = (SMU_THROTTLER_TEMP_MEM_BIT34), |
234 | [THROTTLER_TEMP_VR_GFX_BIT4] = (SMU_THROTTLER_TEMP_VR_GFX_BIT38), |
235 | [THROTTLER_TEMP_VR_MEM_BIT5] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT40), |
236 | [THROTTLER_TEMP_VR_SOC_BIT6] = (SMU_THROTTLER_TEMP_VR_SOC_BIT39), |
237 | [THROTTLER_TDC_GFX_BIT7] = (SMU_THROTTLER_TDC_GFX_BIT16), |
238 | [THROTTLER_TDC_SOC_BIT8] = (SMU_THROTTLER_TDC_SOC_BIT17), |
239 | [THROTTLER_PPT0_BIT9] = (SMU_THROTTLER_PPT0_BIT0), |
240 | [THROTTLER_PPT1_BIT10] = (SMU_THROTTLER_PPT1_BIT1), |
241 | [THROTTLER_PPT2_BIT11] = (SMU_THROTTLER_PPT2_BIT2), |
242 | [THROTTLER_PPT3_BIT12] = (SMU_THROTTLER_PPT3_BIT3), |
243 | [THROTTLER_PPM_BIT13] = (SMU_THROTTLER_PPM_BIT56), |
244 | [THROTTLER_FIT_BIT14] = (SMU_THROTTLER_FIT_BIT57), |
245 | [THROTTLER_APCC_BIT15] = (SMU_THROTTLER_APCC_BIT23), |
246 | [THROTTLER_VRHOT0_BIT16] = (SMU_THROTTLER_VRHOT0_BIT44), |
247 | [THROTTLER_VRHOT1_BIT17] = (SMU_THROTTLER_VRHOT1_BIT45), |
248 | }; |
249 | |
250 | static int arcturus_tables_init(struct smu_context *smu) |
251 | { |
252 | struct smu_table_context *smu_table = &smu->smu_table; |
253 | struct smu_table *tables = smu_table->tables; |
254 | |
255 | SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),do { tables[SMU_TABLE_PPTABLE].size = sizeof(PPTable_t); tables [SMU_TABLE_PPTABLE].align = (1 << 12); tables[SMU_TABLE_PPTABLE ].domain = 0x4; } while (0) |
256 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_PPTABLE].size = sizeof(PPTable_t); tables [SMU_TABLE_PPTABLE].align = (1 << 12); tables[SMU_TABLE_PPTABLE ].domain = 0x4; } while (0); |
257 | |
258 | SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,do { tables[SMU_TABLE_PMSTATUSLOG].size = 0x19000; tables[SMU_TABLE_PMSTATUSLOG ].align = (1 << 12); tables[SMU_TABLE_PMSTATUSLOG].domain = 0x4; } while (0) |
259 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_PMSTATUSLOG].size = 0x19000; tables[SMU_TABLE_PMSTATUSLOG ].align = (1 << 12); tables[SMU_TABLE_PMSTATUSLOG].domain = 0x4; } while (0); |
260 | |
261 | SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),do { tables[SMU_TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t ); tables[SMU_TABLE_SMU_METRICS].align = (1 << 12); tables [SMU_TABLE_SMU_METRICS].domain = 0x4; } while (0) |
262 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t ); tables[SMU_TABLE_SMU_METRICS].align = (1 << 12); tables [SMU_TABLE_SMU_METRICS].domain = 0x4; } while (0); |
263 | |
264 | SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),do { tables[SMU_TABLE_I2C_COMMANDS].size = sizeof(SwI2cRequest_t ); tables[SMU_TABLE_I2C_COMMANDS].align = (1 << 12); tables [SMU_TABLE_I2C_COMMANDS].domain = 0x4; } while (0) |
265 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_I2C_COMMANDS].size = sizeof(SwI2cRequest_t ); tables[SMU_TABLE_I2C_COMMANDS].align = (1 << 12); tables [SMU_TABLE_I2C_COMMANDS].domain = 0x4; } while (0); |
266 | |
267 | SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,do { tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t ); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].align = (1 << 12); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].domain = 0x4; } while (0) |
268 | sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,do { tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t ); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].align = (1 << 12); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].domain = 0x4; } while (0) |
269 | AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t ); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].align = (1 << 12); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].domain = 0x4; } while (0); |
270 | |
271 | smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL(0x0001 | 0x0004)); |
272 | if (!smu_table->metrics_table) |
273 | return -ENOMEM12; |
274 | smu_table->metrics_time = 0; |
275 | |
276 | smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); |
277 | smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL(0x0001 | 0x0004)); |
278 | if (!smu_table->gpu_metrics_table) { |
279 | kfree(smu_table->metrics_table); |
280 | return -ENOMEM12; |
281 | } |
282 | |
283 | return 0; |
284 | } |
285 | |
286 | static int arcturus_allocate_dpm_context(struct smu_context *smu) |
287 | { |
288 | struct smu_dpm_context *smu_dpm = &smu->smu_dpm; |
289 | |
290 | smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), |
291 | GFP_KERNEL(0x0001 | 0x0004)); |
292 | if (!smu_dpm->dpm_context) |
293 | return -ENOMEM12; |
294 | smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); |
295 | |
296 | return 0; |
297 | } |
298 | |
299 | static int arcturus_init_smc_tables(struct smu_context *smu) |
300 | { |
301 | int ret = 0; |
302 | |
303 | ret = arcturus_tables_init(smu); |
304 | if (ret) |
305 | return ret; |
306 | |
307 | ret = arcturus_allocate_dpm_context(smu); |
308 | if (ret) |
309 | return ret; |
310 | |
311 | return smu_v11_0_init_smc_tables(smu); |
312 | } |
313 | |
314 | static int |
315 | arcturus_get_allowed_feature_mask(struct smu_context *smu, |
316 | uint32_t *feature_mask, uint32_t num) |
317 | { |
318 | if (num > 2) |
319 | return -EINVAL22; |
320 | |
321 | /* pptable will handle the features to enable */ |
322 | memset(feature_mask, 0xFF, sizeof(uint32_t) * num)__builtin_memset((feature_mask), (0xFF), (sizeof(uint32_t) * num )); |
323 | |
324 | return 0; |
325 | } |
326 | |
327 | static int arcturus_set_default_dpm_table(struct smu_context *smu) |
328 | { |
329 | struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; |
330 | PPTable_t *driver_ppt = smu->smu_table.driver_pptable; |
331 | struct smu_11_0_dpm_table *dpm_table = NULL((void *)0); |
332 | int ret = 0; |
333 | |
334 | /* socclk dpm table setup */ |
335 | dpm_table = &dpm_context->dpm_tables.soc_table; |
336 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { |
337 | ret = smu_v11_0_set_single_dpm_table(smu, |
338 | SMU_SOCCLK, |
339 | dpm_table); |
340 | if (ret) |
341 | return ret; |
342 | dpm_table->is_fine_grained = |
343 | !driver_ppt->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete; |
344 | } else { |
345 | dpm_table->count = 1; |
346 | dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; |
347 | dpm_table->dpm_levels[0].enabled = true1; |
348 | dpm_table->min = dpm_table->dpm_levels[0].value; |
349 | dpm_table->max = dpm_table->dpm_levels[0].value; |
350 | } |
351 | |
352 | /* gfxclk dpm table setup */ |
353 | dpm_table = &dpm_context->dpm_tables.gfx_table; |
354 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { |
355 | ret = smu_v11_0_set_single_dpm_table(smu, |
356 | SMU_GFXCLK, |
357 | dpm_table); |
358 | if (ret) |
359 | return ret; |
360 | dpm_table->is_fine_grained = |
361 | !driver_ppt->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete; |
362 | } else { |
363 | dpm_table->count = 1; |
364 | dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; |
365 | dpm_table->dpm_levels[0].enabled = true1; |
366 | dpm_table->min = dpm_table->dpm_levels[0].value; |
367 | dpm_table->max = dpm_table->dpm_levels[0].value; |
368 | } |
369 | |
370 | /* memclk dpm table setup */ |
371 | dpm_table = &dpm_context->dpm_tables.uclk_table; |
372 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { |
373 | ret = smu_v11_0_set_single_dpm_table(smu, |
374 | SMU_UCLK, |
375 | dpm_table); |
376 | if (ret) |
377 | return ret; |
378 | dpm_table->is_fine_grained = |
379 | !driver_ppt->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete; |
380 | } else { |
381 | dpm_table->count = 1; |
382 | dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; |
383 | dpm_table->dpm_levels[0].enabled = true1; |
384 | dpm_table->min = dpm_table->dpm_levels[0].value; |
385 | dpm_table->max = dpm_table->dpm_levels[0].value; |
386 | } |
387 | |
388 | /* fclk dpm table setup */ |
389 | dpm_table = &dpm_context->dpm_tables.fclk_table; |
390 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { |
391 | ret = smu_v11_0_set_single_dpm_table(smu, |
392 | SMU_FCLK, |
393 | dpm_table); |
394 | if (ret) |
395 | return ret; |
396 | dpm_table->is_fine_grained = |
397 | !driver_ppt->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete; |
398 | } else { |
399 | dpm_table->count = 1; |
400 | dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; |
401 | dpm_table->dpm_levels[0].enabled = true1; |
402 | dpm_table->min = dpm_table->dpm_levels[0].value; |
403 | dpm_table->max = dpm_table->dpm_levels[0].value; |
404 | } |
405 | |
406 | return 0; |
407 | } |
408 | |
409 | static void arcturus_check_bxco_support(struct smu_context *smu) |
410 | { |
411 | struct smu_table_context *table_context = &smu->smu_table; |
412 | struct smu_11_0_powerplay_table *powerplay_table = |
413 | table_context->power_play_table; |
414 | struct smu_baco_context *smu_baco = &smu->smu_baco; |
415 | struct amdgpu_device *adev = smu->adev; |
416 | uint32_t val; |
417 | |
418 | if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO0x8 || |
419 | powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO0x10) { |
420 | val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[NBIO_HWIP][0] [2] + 0x0000, 0, NBIO_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[NBIO_HWIP][0][2] + 0x0000), 0)); |
421 | smu_baco->platform_support = |
422 | (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK0x00000080L) ? true1 : |
423 | false0; |
424 | } |
425 | } |
426 | |
427 | static void arcturus_check_fan_support(struct smu_context *smu) |
428 | { |
429 | struct smu_table_context *table_context = &smu->smu_table; |
430 | PPTable_t *pptable = table_context->driver_pptable; |
431 | |
432 | /* No sort of fan control possible if PPTable has it disabled */ |
433 | smu->adev->pm.no_fan = |
434 | !(pptable->FeaturesToRun[0] & FEATURE_FAN_CONTROL_MASK(1 << 22 )); |
435 | if (smu->adev->pm.no_fan) |
436 | dev_info_once(smu->adev->dev,do { } while(0) |
437 | "PMFW based fan control disabled")do { } while(0); |
438 | } |
439 | |
440 | static int arcturus_check_powerplay_table(struct smu_context *smu) |
441 | { |
442 | struct smu_table_context *table_context = &smu->smu_table; |
443 | struct smu_11_0_powerplay_table *powerplay_table = |
444 | table_context->power_play_table; |
445 | |
446 | arcturus_check_bxco_support(smu); |
447 | arcturus_check_fan_support(smu); |
448 | |
449 | table_context->thermal_controller_type = |
450 | powerplay_table->thermal_controller_type; |
451 | |
452 | return 0; |
453 | } |
454 | |
455 | static int arcturus_store_powerplay_table(struct smu_context *smu) |
456 | { |
457 | struct smu_table_context *table_context = &smu->smu_table; |
458 | struct smu_11_0_powerplay_table *powerplay_table = |
459 | table_context->power_play_table; |
460 | |
461 | memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,__builtin_memcpy((table_context->driver_pptable), (&powerplay_table ->smc_pptable), (sizeof(PPTable_t))) |
462 | sizeof(PPTable_t))__builtin_memcpy((table_context->driver_pptable), (&powerplay_table ->smc_pptable), (sizeof(PPTable_t))); |
463 | |
464 | return 0; |
465 | } |
466 | |
467 | static int arcturus_append_powerplay_table(struct smu_context *smu) |
468 | { |
469 | struct smu_table_context *table_context = &smu->smu_table; |
470 | PPTable_t *smc_pptable = table_context->driver_pptable; |
471 | struct atom_smc_dpm_info_v4_6 *smc_dpm_table; |
472 | int index, ret; |
473 | |
474 | index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,(__builtin_offsetof(struct atom_master_list_of_data_tables_v2_1 , smc_dpm_info) / sizeof(uint16_t)) |
475 | smc_dpm_info)(__builtin_offsetof(struct atom_master_list_of_data_tables_v2_1 , smc_dpm_info) / sizeof(uint16_t)); |
476 | |
477 | ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL((void *)0), NULL((void *)0), NULL((void *)0), |
478 | (uint8_t **)&smc_dpm_table); |
479 | if (ret) |
480 | return ret; |
481 | |
482 | dev_info(smu->adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n",do { } while(0) |
483 | smc_dpm_table->table_header.format_revision,do { } while(0) |
484 | smc_dpm_table->table_header.content_revision)do { } while(0); |
485 | |
486 | if ((smc_dpm_table->table_header.format_revision == 4) && |
487 | (smc_dpm_table->table_header.content_revision == 6)) |
488 | smu_memcpy_trailing(smc_pptable, MaxVoltageStepGfx, BoardReserved,({ size_t __src_offset = __builtin_offsetof(typeof(*(smc_dpm_table )), maxvoltagestepgfx); size_t __src_size = sizeof(*(smc_dpm_table )) - __src_offset; size_t __dst_offset = __builtin_offsetof(typeof (*(smc_pptable)), MaxVoltageStepGfx); size_t __dst_size = (__builtin_offsetof (typeof(*(smc_pptable)), BoardReserved) + sizeof((((typeof(*( smc_pptable)) *)0)->BoardReserved))) - __dst_offset; __builtin_memcpy ((u8 *)(smc_pptable) + __dst_offset, (u8 *)(smc_dpm_table) + __src_offset , __dst_size); }) |
489 | smc_dpm_table, maxvoltagestepgfx)({ size_t __src_offset = __builtin_offsetof(typeof(*(smc_dpm_table )), maxvoltagestepgfx); size_t __src_size = sizeof(*(smc_dpm_table )) - __src_offset; size_t __dst_offset = __builtin_offsetof(typeof (*(smc_pptable)), MaxVoltageStepGfx); size_t __dst_size = (__builtin_offsetof (typeof(*(smc_pptable)), BoardReserved) + sizeof((((typeof(*( smc_pptable)) *)0)->BoardReserved))) - __dst_offset; __builtin_memcpy ((u8 *)(smc_pptable) + __dst_offset, (u8 *)(smc_dpm_table) + __src_offset , __dst_size); }); |
490 | return 0; |
491 | } |
492 | |
493 | static int arcturus_setup_pptable(struct smu_context *smu) |
494 | { |
495 | int ret = 0; |
496 | |
497 | ret = smu_v11_0_setup_pptable(smu); |
498 | if (ret) |
499 | return ret; |
500 | |
501 | ret = arcturus_store_powerplay_table(smu); |
502 | if (ret) |
503 | return ret; |
504 | |
505 | ret = arcturus_append_powerplay_table(smu); |
506 | if (ret) |
507 | return ret; |
508 | |
509 | ret = arcturus_check_powerplay_table(smu); |
510 | if (ret) |
511 | return ret; |
512 | |
513 | return ret; |
514 | } |
515 | |
516 | static int arcturus_run_btc(struct smu_context *smu) |
517 | { |
518 | int ret = 0; |
519 | |
520 | ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL((void *)0)); |
521 | if (ret) { |
522 | dev_err(smu->adev->dev, "RunAfllBtc failed!\n")printf("drm:pid%d:%s *ERROR* " "RunAfllBtc failed!\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__); |
523 | return ret; |
524 | } |
525 | |
526 | return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL((void *)0)); |
527 | } |
528 | |
529 | static int arcturus_populate_umd_state_clk(struct smu_context *smu) |
530 | { |
531 | struct smu_11_0_dpm_context *dpm_context = |
532 | smu->smu_dpm.dpm_context; |
533 | struct smu_11_0_dpm_table *gfx_table = |
534 | &dpm_context->dpm_tables.gfx_table; |
535 | struct smu_11_0_dpm_table *mem_table = |
536 | &dpm_context->dpm_tables.uclk_table; |
537 | struct smu_11_0_dpm_table *soc_table = |
538 | &dpm_context->dpm_tables.soc_table; |
539 | struct smu_umd_pstate_table *pstate_table = |
540 | &smu->pstate_table; |
541 | |
542 | pstate_table->gfxclk_pstate.min = gfx_table->min; |
543 | pstate_table->gfxclk_pstate.peak = gfx_table->max; |
544 | |
545 | pstate_table->uclk_pstate.min = mem_table->min; |
546 | pstate_table->uclk_pstate.peak = mem_table->max; |
547 | |
548 | pstate_table->socclk_pstate.min = soc_table->min; |
549 | pstate_table->socclk_pstate.peak = soc_table->max; |
550 | |
551 | if (gfx_table->count > ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL0x3 && |
552 | mem_table->count > ARCTURUS_UMD_PSTATE_MCLK_LEVEL0x2 && |
553 | soc_table->count > ARCTURUS_UMD_PSTATE_SOCCLK_LEVEL0x3) { |
554 | pstate_table->gfxclk_pstate.standard = |
555 | gfx_table->dpm_levels[ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL0x3].value; |
556 | pstate_table->uclk_pstate.standard = |
557 | mem_table->dpm_levels[ARCTURUS_UMD_PSTATE_MCLK_LEVEL0x2].value; |
558 | pstate_table->socclk_pstate.standard = |
559 | soc_table->dpm_levels[ARCTURUS_UMD_PSTATE_SOCCLK_LEVEL0x3].value; |
560 | } else { |
561 | pstate_table->gfxclk_pstate.standard = |
562 | pstate_table->gfxclk_pstate.min; |
563 | pstate_table->uclk_pstate.standard = |
564 | pstate_table->uclk_pstate.min; |
565 | pstate_table->socclk_pstate.standard = |
566 | pstate_table->socclk_pstate.min; |
567 | } |
568 | |
569 | return 0; |
570 | } |
571 | |
572 | static int arcturus_get_clk_table(struct smu_context *smu, |
573 | struct pp_clock_levels_with_latency *clocks, |
574 | struct smu_11_0_dpm_table *dpm_table) |
575 | { |
576 | uint32_t i; |
577 | |
578 | clocks->num_levels = min_t(uint32_t,({ uint32_t __min_a = (dpm_table->count); uint32_t __min_b = ((uint32_t)16); __min_a < __min_b ? __min_a : __min_b; } ) |
579 | dpm_table->count,({ uint32_t __min_a = (dpm_table->count); uint32_t __min_b = ((uint32_t)16); __min_a < __min_b ? __min_a : __min_b; } ) |
580 | (uint32_t)PP_MAX_CLOCK_LEVELS)({ uint32_t __min_a = (dpm_table->count); uint32_t __min_b = ((uint32_t)16); __min_a < __min_b ? __min_a : __min_b; } ); |
581 | |
582 | for (i = 0; i < clocks->num_levels; i++) { |
583 | clocks->data[i].clocks_in_khz = |
584 | dpm_table->dpm_levels[i].value * 1000; |
585 | clocks->data[i].latency_in_us = 0; |
586 | } |
587 | |
588 | return 0; |
589 | } |
590 | |
591 | static int arcturus_freqs_in_same_level(int32_t frequency1, |
592 | int32_t frequency2) |
593 | { |
594 | return (abs(frequency1 - frequency2) <= EPSILON1); |
595 | } |
596 | |
597 | static int arcturus_get_smu_metrics_data(struct smu_context *smu, |
598 | MetricsMember_t member, |
599 | uint32_t *value) |
600 | { |
601 | struct smu_table_context *smu_table= &smu->smu_table; |
602 | SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; |
603 | int ret = 0; |
604 | |
605 | ret = smu_cmn_get_metrics_table(smu, |
606 | NULL((void *)0), |
607 | false0); |
608 | if (ret) |
609 | return ret; |
610 | |
611 | switch (member) { |
612 | case METRICS_CURR_GFXCLK: |
613 | *value = metrics->CurrClock[PPCLK_GFXCLK]; |
614 | break; |
615 | case METRICS_CURR_SOCCLK: |
616 | *value = metrics->CurrClock[PPCLK_SOCCLK]; |
617 | break; |
618 | case METRICS_CURR_UCLK: |
619 | *value = metrics->CurrClock[PPCLK_UCLK]; |
620 | break; |
621 | case METRICS_CURR_VCLK: |
622 | *value = metrics->CurrClock[PPCLK_VCLK]; |
623 | break; |
624 | case METRICS_CURR_DCLK: |
625 | *value = metrics->CurrClock[PPCLK_DCLK]; |
626 | break; |
627 | case METRICS_CURR_FCLK: |
628 | *value = metrics->CurrClock[PPCLK_FCLK]; |
629 | break; |
630 | case METRICS_AVERAGE_GFXCLK: |
631 | *value = metrics->AverageGfxclkFrequency; |
632 | break; |
633 | case METRICS_AVERAGE_SOCCLK: |
634 | *value = metrics->AverageSocclkFrequency; |
635 | break; |
636 | case METRICS_AVERAGE_UCLK: |
637 | *value = metrics->AverageUclkFrequency; |
638 | break; |
639 | case METRICS_AVERAGE_VCLK: |
640 | *value = metrics->AverageVclkFrequency; |
641 | break; |
642 | case METRICS_AVERAGE_DCLK: |
643 | *value = metrics->AverageDclkFrequency; |
644 | break; |
645 | case METRICS_AVERAGE_GFXACTIVITY: |
646 | *value = metrics->AverageGfxActivity; |
647 | break; |
648 | case METRICS_AVERAGE_MEMACTIVITY: |
649 | *value = metrics->AverageUclkActivity; |
650 | break; |
651 | case METRICS_AVERAGE_VCNACTIVITY: |
652 | *value = metrics->VcnActivityPercentage; |
653 | break; |
654 | case METRICS_AVERAGE_SOCKETPOWER: |
655 | *value = metrics->AverageSocketPower << 8; |
656 | break; |
657 | case METRICS_TEMPERATURE_EDGE: |
658 | *value = metrics->TemperatureEdge * |
659 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
660 | break; |
661 | case METRICS_TEMPERATURE_HOTSPOT: |
662 | *value = metrics->TemperatureHotspot * |
663 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
664 | break; |
665 | case METRICS_TEMPERATURE_MEM: |
666 | *value = metrics->TemperatureHBM * |
667 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
668 | break; |
669 | case METRICS_TEMPERATURE_VRGFX: |
670 | *value = metrics->TemperatureVrGfx * |
671 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
672 | break; |
673 | case METRICS_TEMPERATURE_VRSOC: |
674 | *value = metrics->TemperatureVrSoc * |
675 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
676 | break; |
677 | case METRICS_TEMPERATURE_VRMEM: |
678 | *value = metrics->TemperatureVrMem * |
679 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
680 | break; |
681 | case METRICS_THROTTLER_STATUS: |
682 | *value = metrics->ThrottlerStatus; |
683 | break; |
684 | case METRICS_CURR_FANSPEED: |
685 | *value = metrics->CurrFanSpeed; |
686 | break; |
687 | default: |
688 | *value = UINT_MAX0xffffffffU; |
689 | break; |
690 | } |
691 | |
692 | return ret; |
693 | } |
694 | |
695 | static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu, |
696 | enum smu_clk_type clk_type, |
697 | uint32_t *value) |
698 | { |
699 | MetricsMember_t member_type; |
700 | int clk_id = 0; |
701 | |
702 | if (!value) |
703 | return -EINVAL22; |
704 | |
705 | clk_id = smu_cmn_to_asic_specific_index(smu, |
706 | CMN2ASIC_MAPPING_CLK, |
707 | clk_type); |
708 | if (clk_id < 0) |
709 | return -EINVAL22; |
710 | |
711 | switch (clk_id) { |
712 | case PPCLK_GFXCLK: |
713 | /* |
714 | * CurrClock[clk_id] can provide accurate |
715 | * output only when the dpm feature is enabled. |
716 | * We can use Average_* for dpm disabled case. |
717 | * But this is available for gfxclk/uclk/socclk/vclk/dclk. |
718 | */ |
719 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) |
720 | member_type = METRICS_CURR_GFXCLK; |
721 | else |
722 | member_type = METRICS_AVERAGE_GFXCLK; |
723 | break; |
724 | case PPCLK_UCLK: |
725 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) |
726 | member_type = METRICS_CURR_UCLK; |
727 | else |
728 | member_type = METRICS_AVERAGE_UCLK; |
729 | break; |
730 | case PPCLK_SOCCLK: |
731 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) |
732 | member_type = METRICS_CURR_SOCCLK; |
733 | else |
734 | member_type = METRICS_AVERAGE_SOCCLK; |
735 | break; |
736 | case PPCLK_VCLK: |
737 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_DPM_BIT)) |
738 | member_type = METRICS_CURR_VCLK; |
739 | else |
740 | member_type = METRICS_AVERAGE_VCLK; |
741 | break; |
742 | case PPCLK_DCLK: |
743 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_DPM_BIT)) |
744 | member_type = METRICS_CURR_DCLK; |
745 | else |
746 | member_type = METRICS_AVERAGE_DCLK; |
747 | break; |
748 | case PPCLK_FCLK: |
749 | member_type = METRICS_CURR_FCLK; |
750 | break; |
751 | default: |
752 | return -EINVAL22; |
753 | } |
754 | |
755 | return arcturus_get_smu_metrics_data(smu, |
756 | member_type, |
757 | value); |
758 | } |
759 | |
760 | static int arcturus_print_clk_levels(struct smu_context *smu, |
761 | enum smu_clk_type type, char *buf) |
762 | { |
763 | int i, now, size = 0; |
764 | int ret = 0; |
765 | struct pp_clock_levels_with_latency clocks; |
766 | struct smu_11_0_dpm_table *single_dpm_table; |
767 | struct smu_dpm_context *smu_dpm = &smu->smu_dpm; |
768 | struct smu_11_0_dpm_context *dpm_context = NULL((void *)0); |
769 | uint32_t gen_speed, lane_width; |
770 | |
771 | smu_cmn_get_sysfs_buf(&buf, &size); |
772 | |
773 | if (amdgpu_ras_intr_triggered()) { |
774 | size += sysfs_emit_at(buf, size, "unavailable\n"); |
775 | return size; |
776 | } |
777 | |
778 | dpm_context = smu_dpm->dpm_context; |
779 | |
780 | switch (type) { |
781 | case SMU_SCLK: |
782 | ret = arcturus_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); |
783 | if (ret) { |
784 | dev_err(smu->adev->dev, "Attempt to get current gfx clk Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get current gfx clk Failed!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
785 | return ret; |
786 | } |
787 | |
788 | single_dpm_table = &(dpm_context->dpm_tables.gfx_table); |
789 | ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table); |
790 | if (ret) { |
791 | dev_err(smu->adev->dev, "Attempt to get gfx clk levels Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get gfx clk levels Failed!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
792 | return ret; |
793 | } |
794 | |
795 | /* |
796 | * For DPM disabled case, there will be only one clock level. |
797 | * And it's safe to assume that is always the current clock. |
798 | */ |
799 | for (i = 0; i < clocks.num_levels; i++) |
800 | size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, |
801 | clocks.data[i].clocks_in_khz / 1000, |
802 | (clocks.num_levels == 1) ? "*" : |
803 | (arcturus_freqs_in_same_level( |
804 | clocks.data[i].clocks_in_khz / 1000, |
805 | now) ? "*" : "")); |
806 | break; |
807 | |
808 | case SMU_MCLK: |
809 | ret = arcturus_get_current_clk_freq_by_table(smu, SMU_UCLK, &now); |
810 | if (ret) { |
811 | dev_err(smu->adev->dev, "Attempt to get current mclk Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get current mclk Failed!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
812 | return ret; |
813 | } |
814 | |
815 | single_dpm_table = &(dpm_context->dpm_tables.uclk_table); |
816 | ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table); |
817 | if (ret) { |
818 | dev_err(smu->adev->dev, "Attempt to get memory clk levels Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get memory clk levels Failed!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
819 | return ret; |
820 | } |
821 | |
822 | for (i = 0; i < clocks.num_levels; i++) |
823 | size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", |
824 | i, clocks.data[i].clocks_in_khz / 1000, |
825 | (clocks.num_levels == 1) ? "*" : |
826 | (arcturus_freqs_in_same_level( |
827 | clocks.data[i].clocks_in_khz / 1000, |
828 | now) ? "*" : "")); |
829 | break; |
830 | |
831 | case SMU_SOCCLK: |
832 | ret = arcturus_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now); |
833 | if (ret) { |
834 | dev_err(smu->adev->dev, "Attempt to get current socclk Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get current socclk Failed!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
835 | return ret; |
836 | } |
837 | |
838 | single_dpm_table = &(dpm_context->dpm_tables.soc_table); |
839 | ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table); |
840 | if (ret) { |
841 | dev_err(smu->adev->dev, "Attempt to get socclk levels Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get socclk levels Failed!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
842 | return ret; |
843 | } |
844 | |
845 | for (i = 0; i < clocks.num_levels; i++) |
846 | size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", |
847 | i, clocks.data[i].clocks_in_khz / 1000, |
848 | (clocks.num_levels == 1) ? "*" : |
849 | (arcturus_freqs_in_same_level( |
850 | clocks.data[i].clocks_in_khz / 1000, |
851 | now) ? "*" : "")); |
852 | break; |
853 | |
854 | case SMU_FCLK: |
855 | ret = arcturus_get_current_clk_freq_by_table(smu, SMU_FCLK, &now); |
856 | if (ret) { |
857 | dev_err(smu->adev->dev, "Attempt to get current fclk Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get current fclk Failed!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
858 | return ret; |
859 | } |
860 | |
861 | single_dpm_table = &(dpm_context->dpm_tables.fclk_table); |
862 | ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table); |
863 | if (ret) { |
864 | dev_err(smu->adev->dev, "Attempt to get fclk levels Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get fclk levels Failed!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
865 | return ret; |
866 | } |
867 | |
868 | for (i = 0; i < single_dpm_table->count; i++) |
869 | size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", |
870 | i, single_dpm_table->dpm_levels[i].value, |
871 | (clocks.num_levels == 1) ? "*" : |
872 | (arcturus_freqs_in_same_level( |
873 | clocks.data[i].clocks_in_khz / 1000, |
874 | now) ? "*" : "")); |
875 | break; |
876 | |
877 | case SMU_VCLK: |
878 | ret = arcturus_get_current_clk_freq_by_table(smu, SMU_VCLK, &now); |
879 | if (ret) { |
880 | dev_err(smu->adev->dev, "Attempt to get current vclk Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get current vclk Failed!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
881 | return ret; |
882 | } |
883 | |
884 | single_dpm_table = &(dpm_context->dpm_tables.vclk_table); |
885 | ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table); |
886 | if (ret) { |
887 | dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get vclk levels Failed!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
888 | return ret; |
889 | } |
890 | |
891 | for (i = 0; i < single_dpm_table->count; i++) |
892 | size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", |
893 | i, single_dpm_table->dpm_levels[i].value, |
894 | (clocks.num_levels == 1) ? "*" : |
895 | (arcturus_freqs_in_same_level( |
896 | clocks.data[i].clocks_in_khz / 1000, |
897 | now) ? "*" : "")); |
898 | break; |
899 | |
900 | case SMU_DCLK: |
901 | ret = arcturus_get_current_clk_freq_by_table(smu, SMU_DCLK, &now); |
902 | if (ret) { |
903 | dev_err(smu->adev->dev, "Attempt to get current dclk Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get current dclk Failed!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
904 | return ret; |
905 | } |
906 | |
907 | single_dpm_table = &(dpm_context->dpm_tables.dclk_table); |
908 | ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table); |
909 | if (ret) { |
910 | dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get dclk levels Failed!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
911 | return ret; |
912 | } |
913 | |
914 | for (i = 0; i < single_dpm_table->count; i++) |
915 | size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", |
916 | i, single_dpm_table->dpm_levels[i].value, |
917 | (clocks.num_levels == 1) ? "*" : |
918 | (arcturus_freqs_in_same_level( |
919 | clocks.data[i].clocks_in_khz / 1000, |
920 | now) ? "*" : "")); |
921 | break; |
922 | |
923 | case SMU_PCIE: |
924 | gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu); |
925 | lane_width = smu_v11_0_get_current_pcie_link_width_level(smu); |
926 | size += sysfs_emit_at(buf, size, "0: %s %s %dMhz *\n", |
927 | (gen_speed == 0) ? "2.5GT/s," : |
928 | (gen_speed == 1) ? "5.0GT/s," : |
929 | (gen_speed == 2) ? "8.0GT/s," : |
930 | (gen_speed == 3) ? "16.0GT/s," : "", |
931 | (lane_width == 1) ? "x1" : |
932 | (lane_width == 2) ? "x2" : |
933 | (lane_width == 3) ? "x4" : |
934 | (lane_width == 4) ? "x8" : |
935 | (lane_width == 5) ? "x12" : |
936 | (lane_width == 6) ? "x16" : "", |
937 | smu->smu_table.boot_values.lclk / 100); |
938 | break; |
939 | |
940 | default: |
941 | break; |
942 | } |
943 | |
944 | return size; |
945 | } |
946 | |
947 | static int arcturus_upload_dpm_level(struct smu_context *smu, |
948 | bool_Bool max, |
949 | uint32_t feature_mask, |
950 | uint32_t level) |
951 | { |
952 | struct smu_11_0_dpm_context *dpm_context = |
953 | smu->smu_dpm.dpm_context; |
954 | uint32_t freq; |
955 | int ret = 0; |
956 | |
957 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && |
958 | (feature_mask & FEATURE_DPM_GFXCLK_MASK(1 << 1 ))) { |
959 | freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value; |
960 | ret = smu_cmn_send_smc_msg_with_param(smu, |
961 | (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), |
962 | (PPCLK_GFXCLK << 16) | (freq & 0xffff), |
963 | NULL((void *)0)); |
964 | if (ret) { |
965 | dev_err(smu->adev->dev, "Failed to set soft %s gfxclk !\n",printf("drm:pid%d:%s *ERROR* " "Failed to set soft %s gfxclk !\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , max ? "max" : "min") |
966 | max ? "max" : "min")printf("drm:pid%d:%s *ERROR* " "Failed to set soft %s gfxclk !\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , max ? "max" : "min"); |
967 | return ret; |
968 | } |
969 | } |
970 | |
971 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && |
972 | (feature_mask & FEATURE_DPM_UCLK_MASK(1 << 2 ))) { |
973 | freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level].value; |
974 | ret = smu_cmn_send_smc_msg_with_param(smu, |
975 | (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), |
976 | (PPCLK_UCLK << 16) | (freq & 0xffff), |
977 | NULL((void *)0)); |
978 | if (ret) { |
979 | dev_err(smu->adev->dev, "Failed to set soft %s memclk !\n",printf("drm:pid%d:%s *ERROR* " "Failed to set soft %s memclk !\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , max ? "max" : "min") |
980 | max ? "max" : "min")printf("drm:pid%d:%s *ERROR* " "Failed to set soft %s memclk !\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , max ? "max" : "min"); |
981 | return ret; |
982 | } |
983 | } |
984 | |
985 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) && |
986 | (feature_mask & FEATURE_DPM_SOCCLK_MASK(1 << 3 ))) { |
987 | freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value; |
988 | ret = smu_cmn_send_smc_msg_with_param(smu, |
989 | (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), |
990 | (PPCLK_SOCCLK << 16) | (freq & 0xffff), |
991 | NULL((void *)0)); |
992 | if (ret) { |
993 | dev_err(smu->adev->dev, "Failed to set soft %s socclk !\n",printf("drm:pid%d:%s *ERROR* " "Failed to set soft %s socclk !\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , max ? "max" : "min") |
994 | max ? "max" : "min")printf("drm:pid%d:%s *ERROR* " "Failed to set soft %s socclk !\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , max ? "max" : "min"); |
995 | return ret; |
996 | } |
997 | } |
998 | |
999 | return ret; |
1000 | } |
1001 | |
1002 | static int arcturus_force_clk_levels(struct smu_context *smu, |
1003 | enum smu_clk_type type, uint32_t mask) |
1004 | { |
1005 | struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; |
1006 | struct smu_11_0_dpm_table *single_dpm_table = NULL((void *)0); |
1007 | uint32_t soft_min_level, soft_max_level; |
1008 | uint32_t smu_version; |
1009 | int ret = 0; |
1010 | |
1011 | ret = smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version); |
1012 | if (ret) { |
1013 | dev_err(smu->adev->dev, "Failed to get smu version!\n")printf("drm:pid%d:%s *ERROR* " "Failed to get smu version!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1014 | return ret; |
1015 | } |
1016 | |
1017 | if ((smu_version >= 0x361200) && |
1018 | (smu_version <= 0x361a00)) { |
1019 | dev_err(smu->adev->dev, "Forcing clock level is not supported with "printf("drm:pid%d:%s *ERROR* " "Forcing clock level is not supported with " "54.18 - 54.26(included) SMU firmwares\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__) |
1020 | "54.18 - 54.26(included) SMU firmwares\n")printf("drm:pid%d:%s *ERROR* " "Forcing clock level is not supported with " "54.18 - 54.26(included) SMU firmwares\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__); |
1021 | return -EOPNOTSUPP45; |
1022 | } |
1023 | |
1024 | soft_min_level = mask ? (ffs(mask) - 1) : 0; |
1025 | soft_max_level = mask ? (fls(mask) - 1) : 0; |
1026 | |
1027 | switch (type) { |
1028 | case SMU_SCLK: |
1029 | single_dpm_table = &(dpm_context->dpm_tables.gfx_table); |
1030 | if (soft_max_level >= single_dpm_table->count) { |
1031 | dev_err(smu->adev->dev, "Clock level specified %d is over max allowed %d\n",printf("drm:pid%d:%s *ERROR* " "Clock level specified %d is over max allowed %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , soft_max_level , single_dpm_table->count - 1) |
1032 | soft_max_level, single_dpm_table->count - 1)printf("drm:pid%d:%s *ERROR* " "Clock level specified %d is over max allowed %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , soft_max_level , single_dpm_table->count - 1); |
1033 | ret = -EINVAL22; |
1034 | break; |
1035 | } |
1036 | |
1037 | ret = arcturus_upload_dpm_level(smu, |
1038 | false0, |
1039 | FEATURE_DPM_GFXCLK_MASK(1 << 1 ), |
1040 | soft_min_level); |
1041 | if (ret) { |
1042 | dev_err(smu->adev->dev, "Failed to upload boot level to lowest!\n")printf("drm:pid%d:%s *ERROR* " "Failed to upload boot level to lowest!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1043 | break; |
1044 | } |
1045 | |
1046 | ret = arcturus_upload_dpm_level(smu, |
1047 | true1, |
1048 | FEATURE_DPM_GFXCLK_MASK(1 << 1 ), |
1049 | soft_max_level); |
1050 | if (ret) |
1051 | dev_err(smu->adev->dev, "Failed to upload dpm max level to highest!\n")printf("drm:pid%d:%s *ERROR* " "Failed to upload dpm max level to highest!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1052 | |
1053 | break; |
1054 | |
1055 | case SMU_MCLK: |
1056 | case SMU_SOCCLK: |
1057 | case SMU_FCLK: |
1058 | /* |
1059 | * Should not arrive here since Arcturus does not |
1060 | * support mclk/socclk/fclk softmin/softmax settings |
1061 | */ |
1062 | ret = -EINVAL22; |
1063 | break; |
1064 | |
1065 | default: |
1066 | break; |
1067 | } |
1068 | |
1069 | return ret; |
1070 | } |
1071 | |
1072 | static int arcturus_get_thermal_temperature_range(struct smu_context *smu, |
1073 | struct smu_temperature_range *range) |
1074 | { |
1075 | struct smu_table_context *table_context = &smu->smu_table; |
1076 | struct smu_11_0_powerplay_table *powerplay_table = |
1077 | table_context->power_play_table; |
1078 | PPTable_t *pptable = smu->smu_table.driver_pptable; |
1079 | |
1080 | if (!range) |
1081 | return -EINVAL22; |
1082 | |
1083 | memcpy(range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range))__builtin_memcpy((range), (&smu11_thermal_policy[0]), (sizeof (struct smu_temperature_range))); |
1084 | |
1085 | range->max = pptable->TedgeLimit * |
1086 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
1087 | range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE5) * |
1088 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
1089 | range->hotspot_crit_max = pptable->ThotspotLimit * |
1090 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
1091 | range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT5) * |
1092 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
1093 | range->mem_crit_max = pptable->TmemLimit * |
1094 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
1095 | range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM5)* |
1096 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
1097 | range->software_shutdown_temp = powerplay_table->software_shutdown_temp; |
1098 | |
1099 | return 0; |
1100 | } |
1101 | |
1102 | static int arcturus_read_sensor(struct smu_context *smu, |
1103 | enum amd_pp_sensors sensor, |
1104 | void *data, uint32_t *size) |
1105 | { |
1106 | struct smu_table_context *table_context = &smu->smu_table; |
1107 | PPTable_t *pptable = table_context->driver_pptable; |
1108 | int ret = 0; |
1109 | |
1110 | if (amdgpu_ras_intr_triggered()) |
1111 | return 0; |
1112 | |
1113 | if (!data || !size) |
1114 | return -EINVAL22; |
1115 | |
1116 | switch (sensor) { |
1117 | case AMDGPU_PP_SENSOR_MAX_FAN_RPM: |
1118 | *(uint32_t *)data = pptable->FanMaximumRpm; |
1119 | *size = 4; |
1120 | break; |
1121 | case AMDGPU_PP_SENSOR_MEM_LOAD: |
1122 | ret = arcturus_get_smu_metrics_data(smu, |
1123 | METRICS_AVERAGE_MEMACTIVITY, |
1124 | (uint32_t *)data); |
1125 | *size = 4; |
1126 | break; |
1127 | case AMDGPU_PP_SENSOR_GPU_LOAD: |
1128 | ret = arcturus_get_smu_metrics_data(smu, |
1129 | METRICS_AVERAGE_GFXACTIVITY, |
1130 | (uint32_t *)data); |
1131 | *size = 4; |
1132 | break; |
1133 | case AMDGPU_PP_SENSOR_GPU_POWER: |
1134 | ret = arcturus_get_smu_metrics_data(smu, |
1135 | METRICS_AVERAGE_SOCKETPOWER, |
1136 | (uint32_t *)data); |
1137 | *size = 4; |
1138 | break; |
1139 | case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: |
1140 | ret = arcturus_get_smu_metrics_data(smu, |
1141 | METRICS_TEMPERATURE_HOTSPOT, |
1142 | (uint32_t *)data); |
1143 | *size = 4; |
1144 | break; |
1145 | case AMDGPU_PP_SENSOR_EDGE_TEMP: |
1146 | ret = arcturus_get_smu_metrics_data(smu, |
1147 | METRICS_TEMPERATURE_EDGE, |
1148 | (uint32_t *)data); |
1149 | *size = 4; |
1150 | break; |
1151 | case AMDGPU_PP_SENSOR_MEM_TEMP: |
1152 | ret = arcturus_get_smu_metrics_data(smu, |
1153 | METRICS_TEMPERATURE_MEM, |
1154 | (uint32_t *)data); |
1155 | *size = 4; |
1156 | break; |
1157 | case AMDGPU_PP_SENSOR_GFX_MCLK: |
1158 | ret = arcturus_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data); |
1159 | /* the output clock frequency in 10K unit */ |
1160 | *(uint32_t *)data *= 100; |
1161 | *size = 4; |
1162 | break; |
1163 | case AMDGPU_PP_SENSOR_GFX_SCLK: |
1164 | ret = arcturus_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data); |
1165 | *(uint32_t *)data *= 100; |
1166 | *size = 4; |
1167 | break; |
1168 | case AMDGPU_PP_SENSOR_VDDGFX: |
1169 | ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data); |
1170 | *size = 4; |
1171 | break; |
1172 | default: |
1173 | ret = -EOPNOTSUPP45; |
1174 | break; |
1175 | } |
1176 | |
1177 | return ret; |
1178 | } |
1179 | |
1180 | static int arcturus_set_fan_static_mode(struct smu_context *smu, |
1181 | uint32_t mode) |
1182 | { |
1183 | struct amdgpu_device *adev = smu->adev; |
1184 | |
1185 | WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2_ARCT,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8D), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8D, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8D), 0))) & ~0x000000FFL) | (0x000000FFL & ((0) << 0x0))), 0, THM_HWIP ) : amdgpu_device_wreg(adev, ((adev->reg_offset[THM_HWIP][ 0][0] + 0x8D)), ((((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx .rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev ->reg_offset[THM_HWIP][0][0] + 0x8D, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8D), 0))) & ~0x000000FFL) | (0x000000FFL & ((0) << 0x0)))), 0) ) |
1186 | REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2_ARCT),((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8D), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8D, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8D), 0))) & ~0x000000FFL) | (0x000000FFL & ((0) << 0x0))), 0, THM_HWIP ) : amdgpu_device_wreg(adev, ((adev->reg_offset[THM_HWIP][ 0][0] + 0x8D)), ((((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx .rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev ->reg_offset[THM_HWIP][0][0] + 0x8D, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8D), 0))) & ~0x000000FFL) | (0x000000FFL & ((0) << 0x0)))), 0) ) |
1187 | CG_FDO_CTRL2, TMIN, 0))((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8D), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8D, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8D), 0))) & ~0x000000FFL) | (0x000000FFL & ((0) << 0x0))), 0, THM_HWIP ) : amdgpu_device_wreg(adev, ((adev->reg_offset[THM_HWIP][ 0][0] + 0x8D)), ((((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx .rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev ->reg_offset[THM_HWIP][0][0] + 0x8D, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8D), 0))) & ~0x000000FFL) | (0x000000FFL & ((0) << 0x0)))), 0) ); |
1188 | WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2_ARCT,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8D), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8D, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8D), 0))) & ~0x00003800L) | (0x00003800L & ((mode) << 0xb))), 0 , THM_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[ THM_HWIP][0][0] + 0x8D)), ((((((((adev)->virt.caps & ( 1 << 2)) && adev->gfx.rlc.funcs && adev ->gfx.rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev , adev->reg_offset[THM_HWIP][0][0] + 0x8D, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8D), 0))) & ~0x00003800L) | (0x00003800L & ((mode) << 0xb)))), 0)) |
1189 | REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2_ARCT),((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8D), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8D, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8D), 0))) & ~0x00003800L) | (0x00003800L & ((mode) << 0xb))), 0 , THM_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[ THM_HWIP][0][0] + 0x8D)), ((((((((adev)->virt.caps & ( 1 << 2)) && adev->gfx.rlc.funcs && adev ->gfx.rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev , adev->reg_offset[THM_HWIP][0][0] + 0x8D, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8D), 0))) & ~0x00003800L) | (0x00003800L & ((mode) << 0xb)))), 0)) |
1190 | CG_FDO_CTRL2, FDO_PWM_MODE, mode))((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8D), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8D, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8D), 0))) & ~0x00003800L) | (0x00003800L & ((mode) << 0xb))), 0 , THM_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[ THM_HWIP][0][0] + 0x8D)), ((((((((adev)->virt.caps & ( 1 << 2)) && adev->gfx.rlc.funcs && adev ->gfx.rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev , adev->reg_offset[THM_HWIP][0][0] + 0x8D, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8D), 0))) & ~0x00003800L) | (0x00003800L & ((mode) << 0xb)))), 0)); |
1191 | |
1192 | return 0; |
1193 | } |
1194 | |
1195 | static int arcturus_get_fan_speed_rpm(struct smu_context *smu, |
1196 | uint32_t *speed) |
1197 | { |
1198 | struct amdgpu_device *adev = smu->adev; |
1199 | uint32_t crystal_clock_freq = 2500; |
1200 | uint32_t tach_status; |
1201 | uint64_t tmp64; |
1202 | int ret = 0; |
1203 | |
1204 | if (!speed) |
1205 | return -EINVAL22; |
1206 | |
1207 | switch (smu_v11_0_get_fan_control_mode(smu)) { |
1208 | case AMD_FAN_CTRL_AUTO: |
1209 | ret = arcturus_get_smu_metrics_data(smu, |
1210 | METRICS_CURR_FANSPEED, |
1211 | speed); |
1212 | break; |
1213 | default: |
1214 | /* |
1215 | * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly |
1216 | * detected via register retrieving. To workaround this, we will |
1217 | * report the fan speed as 0 RPM if user just requested such. |
1218 | */ |
1219 | if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_RPM(1 << 1)) |
1220 | && !smu->user_dpm_profile.fan_speed_rpm) { |
1221 | *speed = 0; |
1222 | return 0; |
1223 | } |
1224 | |
1225 | tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000; |
1226 | tach_status = RREG32_SOC15(THM, 0, mmCG_TACH_STATUS_ARCT)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[THM_HWIP][0][ 0] + 0x8F, 0, THM_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[THM_HWIP][0][0] + 0x8F), 0)); |
1227 | if (tach_status) { |
1228 | do_div(tmp64, tach_status)({ uint32_t __base = (tach_status); uint32_t __rem = ((uint64_t )(tmp64)) % __base; (tmp64) = ((uint64_t)(tmp64)) / __base; __rem ; }); |
1229 | *speed = (uint32_t)tmp64; |
1230 | } else { |
1231 | *speed = 0; |
1232 | } |
1233 | |
1234 | break; |
1235 | } |
1236 | |
1237 | return ret; |
1238 | } |
1239 | |
1240 | static int arcturus_set_fan_speed_pwm(struct smu_context *smu, |
1241 | uint32_t speed) |
1242 | { |
1243 | struct amdgpu_device *adev = smu->adev; |
1244 | uint32_t duty100, duty; |
1245 | uint64_t tmp64; |
1246 | |
1247 | speed = MIN(speed, 255)(((speed)<(255))?(speed):(255)); |
1248 | |
1249 | duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1_ARCT),(((((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[THM_HWIP][0][ 0] + 0x8C, 0, THM_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[THM_HWIP][0][0] + 0x8C), 0))) & 0x000000FFL) >> 0x0) |
1250 | CG_FDO_CTRL1, FMAX_DUTY100)(((((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[THM_HWIP][0][ 0] + 0x8C, 0, THM_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[THM_HWIP][0][0] + 0x8C), 0))) & 0x000000FFL) >> 0x0); |
1251 | if (!duty100) |
1252 | return -EINVAL22; |
1253 | |
1254 | tmp64 = (uint64_t)speed * duty100; |
1255 | do_div(tmp64, 255)({ uint32_t __base = (255); uint32_t __rem = ((uint64_t)(tmp64 )) % __base; (tmp64) = ((uint64_t)(tmp64)) / __base; __rem; } ); |
1256 | duty = (uint32_t)tmp64; |
1257 | |
1258 | WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0_ARCT,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8B), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8B, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8B), 0))) & ~0x000000FFL) | (0x000000FFL & ((duty) << 0x0))), 0 , THM_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[ THM_HWIP][0][0] + 0x8B)), ((((((((adev)->virt.caps & ( 1 << 2)) && adev->gfx.rlc.funcs && adev ->gfx.rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev , adev->reg_offset[THM_HWIP][0][0] + 0x8B, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8B), 0))) & ~0x000000FFL) | (0x000000FFL & ((duty) << 0x0)))), 0)) |
1259 | REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0_ARCT),((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8B), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8B, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8B), 0))) & ~0x000000FFL) | (0x000000FFL & ((duty) << 0x0))), 0 , THM_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[ THM_HWIP][0][0] + 0x8B)), ((((((((adev)->virt.caps & ( 1 << 2)) && adev->gfx.rlc.funcs && adev ->gfx.rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev , adev->reg_offset[THM_HWIP][0][0] + 0x8B, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8B), 0))) & ~0x000000FFL) | (0x000000FFL & ((duty) << 0x0)))), 0)) |
1260 | CG_FDO_CTRL0, FDO_STATIC_DUTY, duty))((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8B), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8B, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8B), 0))) & ~0x000000FFL) | (0x000000FFL & ((duty) << 0x0))), 0 , THM_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset[ THM_HWIP][0][0] + 0x8B)), ((((((((adev)->virt.caps & ( 1 << 2)) && adev->gfx.rlc.funcs && adev ->gfx.rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev , adev->reg_offset[THM_HWIP][0][0] + 0x8B, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8B), 0))) & ~0x000000FFL) | (0x000000FFL & ((duty) << 0x0)))), 0)); |
1261 | |
1262 | return arcturus_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC1); |
1263 | } |
1264 | |
1265 | static int arcturus_set_fan_speed_rpm(struct smu_context *smu, |
1266 | uint32_t speed) |
1267 | { |
1268 | struct amdgpu_device *adev = smu->adev; |
1269 | /* |
1270 | * crystal_clock_freq used for fan speed rpm calculation is |
1271 | * always 25Mhz. So, hardcode it as 2500(in 10K unit). |
1272 | */ |
1273 | uint32_t crystal_clock_freq = 2500; |
1274 | uint32_t tach_period; |
1275 | |
1276 | tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); |
1277 | WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8E), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8E, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8E), 0))) & ~0xFFFFFFF8L) | (0xFFFFFFF8L & ((tach_period) << 0x3 ))), 0, THM_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [THM_HWIP][0][0] + 0x8E)), ((((((((adev)->virt.caps & ( 1 << 2)) && adev->gfx.rlc.funcs && adev ->gfx.rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev , adev->reg_offset[THM_HWIP][0][0] + 0x8E, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8E), 0))) & ~0xFFFFFFF8L) | (0xFFFFFFF8L & ((tach_period) << 0x3 )))), 0)) |
1278 | REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT),((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8E), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8E, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8E), 0))) & ~0xFFFFFFF8L) | (0xFFFFFFF8L & ((tach_period) << 0x3 ))), 0, THM_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [THM_HWIP][0][0] + 0x8E)), ((((((((adev)->virt.caps & ( 1 << 2)) && adev->gfx.rlc.funcs && adev ->gfx.rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev , adev->reg_offset[THM_HWIP][0][0] + 0x8E, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8E), 0))) & ~0xFFFFFFF8L) | (0xFFFFFFF8L & ((tach_period) << 0x3 )))), 0)) |
1279 | CG_TACH_CTRL, TARGET_PERIOD,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8E), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8E, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8E), 0))) & ~0xFFFFFFF8L) | (0xFFFFFFF8L & ((tach_period) << 0x3 ))), 0, THM_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [THM_HWIP][0][0] + 0x8E)), ((((((((adev)->virt.caps & ( 1 << 2)) && adev->gfx.rlc.funcs && adev ->gfx.rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev , adev->reg_offset[THM_HWIP][0][0] + 0x8E, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8E), 0))) & ~0xFFFFFFF8L) | (0xFFFFFFF8L & ((tach_period) << 0x3 )))), 0)) |
1280 | tach_period))((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[THM_HWIP][0] [0] + 0x8E), (((((((adev)->virt.caps & (1 << 2)) && adev->gfx.rlc.funcs && adev->gfx.rlc .rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev, adev-> reg_offset[THM_HWIP][0][0] + 0x8E, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8E), 0))) & ~0xFFFFFFF8L) | (0xFFFFFFF8L & ((tach_period) << 0x3 ))), 0, THM_HWIP) : amdgpu_device_wreg(adev, ((adev->reg_offset [THM_HWIP][0][0] + 0x8E)), ((((((((adev)->virt.caps & ( 1 << 2)) && adev->gfx.rlc.funcs && adev ->gfx.rlc.rlcg_reg_access_supported) ? amdgpu_sriov_rreg(adev , adev->reg_offset[THM_HWIP][0][0] + 0x8E, 0, THM_HWIP) : amdgpu_device_rreg (adev, (adev->reg_offset[THM_HWIP][0][0] + 0x8E), 0))) & ~0xFFFFFFF8L) | (0xFFFFFFF8L & ((tach_period) << 0x3 )))), 0)); |
1281 | |
1282 | return arcturus_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM5); |
1283 | } |
1284 | |
1285 | static int arcturus_get_fan_speed_pwm(struct smu_context *smu, |
1286 | uint32_t *speed) |
1287 | { |
1288 | struct amdgpu_device *adev = smu->adev; |
1289 | uint32_t duty100, duty; |
1290 | uint64_t tmp64; |
1291 | |
1292 | /* |
1293 | * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly |
1294 | * detected via register retrieving. To workaround this, we will |
1295 | * report the fan speed as 0 PWM if user just requested such. |
1296 | */ |
1297 | if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_PWM(1 << 2)) |
1298 | && !smu->user_dpm_profile.fan_speed_pwm) { |
1299 | *speed = 0; |
1300 | return 0; |
1301 | } |
1302 | |
1303 | duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1_ARCT),(((((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[THM_HWIP][0][ 0] + 0x8C, 0, THM_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[THM_HWIP][0][0] + 0x8C), 0))) & 0x000000FFL) >> 0x0) |
1304 | CG_FDO_CTRL1, FMAX_DUTY100)(((((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[THM_HWIP][0][ 0] + 0x8C, 0, THM_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[THM_HWIP][0][0] + 0x8C), 0))) & 0x000000FFL) >> 0x0); |
1305 | duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS_ARCT),(((((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[THM_HWIP][0][ 0] + 0x90, 0, THM_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[THM_HWIP][0][0] + 0x90), 0))) & 0x0001FE00L) >> 0x9) |
1306 | CG_THERMAL_STATUS, FDO_PWM_DUTY)(((((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[THM_HWIP][0][ 0] + 0x90, 0, THM_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[THM_HWIP][0][0] + 0x90), 0))) & 0x0001FE00L) >> 0x9); |
1307 | |
1308 | if (duty100) { |
1309 | tmp64 = (uint64_t)duty * 255; |
1310 | do_div(tmp64, duty100)({ uint32_t __base = (duty100); uint32_t __rem = ((uint64_t)( tmp64)) % __base; (tmp64) = ((uint64_t)(tmp64)) / __base; __rem ; }); |
1311 | *speed = MIN((uint32_t)tmp64, 255)((((uint32_t)tmp64)<(255))?((uint32_t)tmp64):(255)); |
1312 | } else { |
1313 | *speed = 0; |
1314 | } |
1315 | |
1316 | return 0; |
1317 | } |
1318 | |
1319 | static int arcturus_get_fan_parameters(struct smu_context *smu) |
1320 | { |
1321 | PPTable_t *pptable = smu->smu_table.driver_pptable; |
1322 | |
1323 | smu->fan_max_rpm = pptable->FanMaximumRpm; |
1324 | |
1325 | return 0; |
1326 | } |
1327 | |
1328 | static int arcturus_get_power_limit(struct smu_context *smu, |
1329 | uint32_t *current_power_limit, |
1330 | uint32_t *default_power_limit, |
1331 | uint32_t *max_power_limit) |
1332 | { |
1333 | struct smu_11_0_powerplay_table *powerplay_table = |
1334 | (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; |
1335 | PPTable_t *pptable = smu->smu_table.driver_pptable; |
1336 | uint32_t power_limit, od_percent; |
1337 | |
1338 | if (smu_v11_0_get_current_power_limit(smu, &power_limit)) { |
1339 | /* the last hope to figure out the ppt limit */ |
1340 | if (!pptable) { |
1341 | dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!")printf("drm:pid%d:%s *ERROR* " "Cannot get PPT limit due to pptable missing!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1342 | return -EINVAL22; |
1343 | } |
1344 | power_limit = |
1345 | pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; |
1346 | } |
1347 | |
1348 | if (current_power_limit) |
1349 | *current_power_limit = power_limit; |
1350 | if (default_power_limit) |
1351 | *default_power_limit = power_limit; |
1352 | |
1353 | if (max_power_limit) { |
1354 | if (smu->od_enabled) { |
1355 | od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE])((__uint32_t)(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE ])); |
1356 | |
1357 | dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit)do { } while(0); |
1358 | |
1359 | power_limit *= (100 + od_percent); |
1360 | power_limit /= 100; |
1361 | } |
1362 | |
1363 | *max_power_limit = power_limit; |
1364 | } |
1365 | |
1366 | return 0; |
1367 | } |
1368 | |
1369 | static int arcturus_get_power_profile_mode(struct smu_context *smu, |
1370 | char *buf) |
1371 | { |
1372 | DpmActivityMonitorCoeffInt_t activity_monitor; |
1373 | static const char *title[] = { |
1374 | "PROFILE_INDEX(NAME)", |
1375 | "CLOCK_TYPE(NAME)", |
1376 | "FPS", |
1377 | "UseRlcBusy", |
1378 | "MinActiveFreqType", |
1379 | "MinActiveFreq", |
1380 | "BoosterFreqType", |
1381 | "BoosterFreq", |
1382 | "PD_Data_limit_c", |
1383 | "PD_Data_error_coeff", |
1384 | "PD_Data_error_rate_coeff"}; |
1385 | uint32_t i, size = 0; |
1386 | int16_t workload_type = 0; |
1387 | int result = 0; |
1388 | uint32_t smu_version; |
1389 | |
1390 | if (!buf) |
1391 | return -EINVAL22; |
1392 | |
1393 | result = smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version); |
1394 | if (result) |
1395 | return result; |
1396 | |
1397 | if (smu_version >= 0x360d00) |
1398 | size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n", |
1399 | title[0], title[1], title[2], title[3], title[4], title[5], |
1400 | title[6], title[7], title[8], title[9], title[10]); |
1401 | else |
1402 | size += sysfs_emit_at(buf, size, "%16s\n", |
1403 | title[0]); |
1404 | |
1405 | for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { |
1406 | /* |
1407 | * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT |
1408 | * Not all profile modes are supported on arcturus. |
1409 | */ |
1410 | workload_type = smu_cmn_to_asic_specific_index(smu, |
1411 | CMN2ASIC_MAPPING_WORKLOAD, |
1412 | i); |
1413 | if (workload_type < 0) |
1414 | continue; |
1415 | |
1416 | if (smu_version >= 0x360d00) { |
1417 | result = smu_cmn_update_table(smu, |
1418 | SMU_TABLE_ACTIVITY_MONITOR_COEFF, |
1419 | workload_type, |
1420 | (void *)(&activity_monitor), |
1421 | false0); |
1422 | if (result) { |
1423 | dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__)printf("drm:pid%d:%s *ERROR* " "[%s] Failed to get activity monitor!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__ ); |
1424 | return result; |
1425 | } |
1426 | } |
1427 | |
1428 | size += sysfs_emit_at(buf, size, "%2d %14s%s\n", |
1429 | i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); |
1430 | |
1431 | if (smu_version >= 0x360d00) { |
1432 | size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", |
1433 | " ", |
1434 | 0, |
1435 | "GFXCLK", |
1436 | activity_monitor.Gfx_FPS, |
1437 | activity_monitor.Gfx_UseRlcBusy, |
1438 | activity_monitor.Gfx_MinActiveFreqType, |
1439 | activity_monitor.Gfx_MinActiveFreq, |
1440 | activity_monitor.Gfx_BoosterFreqType, |
1441 | activity_monitor.Gfx_BoosterFreq, |
1442 | activity_monitor.Gfx_PD_Data_limit_c, |
1443 | activity_monitor.Gfx_PD_Data_error_coeff, |
1444 | activity_monitor.Gfx_PD_Data_error_rate_coeff); |
1445 | |
1446 | size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", |
1447 | " ", |
1448 | 1, |
1449 | "UCLK", |
1450 | activity_monitor.Mem_FPS, |
1451 | activity_monitor.Mem_UseRlcBusy, |
1452 | activity_monitor.Mem_MinActiveFreqType, |
1453 | activity_monitor.Mem_MinActiveFreq, |
1454 | activity_monitor.Mem_BoosterFreqType, |
1455 | activity_monitor.Mem_BoosterFreq, |
1456 | activity_monitor.Mem_PD_Data_limit_c, |
1457 | activity_monitor.Mem_PD_Data_error_coeff, |
1458 | activity_monitor.Mem_PD_Data_error_rate_coeff); |
1459 | } |
1460 | } |
1461 | |
1462 | return size; |
1463 | } |
1464 | |
1465 | static int arcturus_set_power_profile_mode(struct smu_context *smu, |
1466 | long *input, |
1467 | uint32_t size) |
1468 | { |
1469 | DpmActivityMonitorCoeffInt_t activity_monitor; |
1470 | int workload_type = 0; |
1471 | uint32_t profile_mode = input[size]; |
1472 | int ret = 0; |
1473 | uint32_t smu_version; |
1474 | |
1475 | if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { |
1476 | dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode)printf("drm:pid%d:%s *ERROR* " "Invalid power profile mode %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , profile_mode ); |
1477 | return -EINVAL22; |
1478 | } |
1479 | |
1480 | ret = smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version); |
1481 | if (ret) |
1482 | return ret; |
1483 | |
1484 | if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && |
1485 | (smu_version >=0x360d00)) { |
1486 | ret = smu_cmn_update_table(smu, |
1487 | SMU_TABLE_ACTIVITY_MONITOR_COEFF, |
1488 | WORKLOAD_PPLIB_CUSTOM_BIT4, |
1489 | (void *)(&activity_monitor), |
1490 | false0); |
1491 | if (ret) { |
1492 | dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__)printf("drm:pid%d:%s *ERROR* " "[%s] Failed to get activity monitor!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__ ); |
1493 | return ret; |
1494 | } |
1495 | |
1496 | switch (input[0]) { |
1497 | case 0: /* Gfxclk */ |
1498 | activity_monitor.Gfx_FPS = input[1]; |
1499 | activity_monitor.Gfx_UseRlcBusy = input[2]; |
1500 | activity_monitor.Gfx_MinActiveFreqType = input[3]; |
1501 | activity_monitor.Gfx_MinActiveFreq = input[4]; |
1502 | activity_monitor.Gfx_BoosterFreqType = input[5]; |
1503 | activity_monitor.Gfx_BoosterFreq = input[6]; |
1504 | activity_monitor.Gfx_PD_Data_limit_c = input[7]; |
1505 | activity_monitor.Gfx_PD_Data_error_coeff = input[8]; |
1506 | activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; |
1507 | break; |
1508 | case 1: /* Uclk */ |
1509 | activity_monitor.Mem_FPS = input[1]; |
1510 | activity_monitor.Mem_UseRlcBusy = input[2]; |
1511 | activity_monitor.Mem_MinActiveFreqType = input[3]; |
1512 | activity_monitor.Mem_MinActiveFreq = input[4]; |
1513 | activity_monitor.Mem_BoosterFreqType = input[5]; |
1514 | activity_monitor.Mem_BoosterFreq = input[6]; |
1515 | activity_monitor.Mem_PD_Data_limit_c = input[7]; |
1516 | activity_monitor.Mem_PD_Data_error_coeff = input[8]; |
1517 | activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; |
1518 | break; |
1519 | } |
1520 | |
1521 | ret = smu_cmn_update_table(smu, |
1522 | SMU_TABLE_ACTIVITY_MONITOR_COEFF, |
1523 | WORKLOAD_PPLIB_CUSTOM_BIT4, |
1524 | (void *)(&activity_monitor), |
1525 | true1); |
1526 | if (ret) { |
1527 | dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__)printf("drm:pid%d:%s *ERROR* " "[%s] Failed to set activity monitor!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__ ); |
1528 | return ret; |
1529 | } |
1530 | } |
1531 | |
1532 | /* |
1533 | * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT |
1534 | * Not all profile modes are supported on arcturus. |
1535 | */ |
1536 | workload_type = smu_cmn_to_asic_specific_index(smu, |
1537 | CMN2ASIC_MAPPING_WORKLOAD, |
1538 | profile_mode); |
1539 | if (workload_type < 0) { |
1540 | dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode)do { } while(0); |
1541 | return -EINVAL22; |
1542 | } |
1543 | |
1544 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1545 | SMU_MSG_SetWorkloadMask, |
1546 | 1 << workload_type, |
1547 | NULL((void *)0)); |
1548 | if (ret) { |
1549 | dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type)printf("drm:pid%d:%s *ERROR* " "Fail to set workload type %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , workload_type ); |
1550 | return ret; |
1551 | } |
1552 | |
1553 | smu->power_profile_mode = profile_mode; |
1554 | |
1555 | return 0; |
1556 | } |
1557 | |
1558 | static int arcturus_set_performance_level(struct smu_context *smu, |
1559 | enum amd_dpm_forced_level level) |
1560 | { |
1561 | uint32_t smu_version; |
1562 | int ret; |
1563 | |
1564 | ret = smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version); |
1565 | if (ret) { |
1566 | dev_err(smu->adev->dev, "Failed to get smu version!\n")printf("drm:pid%d:%s *ERROR* " "Failed to get smu version!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1567 | return ret; |
1568 | } |
1569 | |
1570 | switch (level) { |
1571 | case AMD_DPM_FORCED_LEVEL_HIGH: |
1572 | case AMD_DPM_FORCED_LEVEL_LOW: |
1573 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: |
1574 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: |
1575 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: |
1576 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: |
1577 | if ((smu_version >= 0x361200) && |
1578 | (smu_version <= 0x361a00)) { |
1579 | dev_err(smu->adev->dev, "Forcing clock level is not supported with "printf("drm:pid%d:%s *ERROR* " "Forcing clock level is not supported with " "54.18 - 54.26(included) SMU firmwares\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__) |
1580 | "54.18 - 54.26(included) SMU firmwares\n")printf("drm:pid%d:%s *ERROR* " "Forcing clock level is not supported with " "54.18 - 54.26(included) SMU firmwares\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__); |
1581 | return -EOPNOTSUPP45; |
1582 | } |
1583 | break; |
1584 | default: |
1585 | break; |
1586 | } |
1587 | |
1588 | return smu_v11_0_set_performance_level(smu, level); |
1589 | } |
1590 | |
1591 | static void arcturus_dump_pptable(struct smu_context *smu) |
1592 | { |
1593 | struct smu_table_context *table_context = &smu->smu_table; |
1594 | PPTable_t *pptable = table_context->driver_pptable; |
Value stored to 'pptable' during its initialization is never read | |
1595 | int i; |
1596 | |
1597 | dev_info(smu->adev->dev, "Dumped PPTable:\n")do { } while(0); |
1598 | |
1599 | dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version)do { } while(0); |
1600 | |
1601 | dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0])do { } while(0); |
1602 | dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1])do { } while(0); |
1603 | |
1604 | for (i = 0; i < PPT_THROTTLER_COUNT; i++) { |
1605 | dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = %d\n", i, pptable->SocketPowerLimitAc[i])do { } while(0); |
1606 | dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = %d\n", i, pptable->SocketPowerLimitAcTau[i])do { } while(0); |
1607 | } |
1608 | |
1609 | dev_info(smu->adev->dev, "TdcLimitSoc = %d\n", pptable->TdcLimitSoc)do { } while(0); |
1610 | dev_info(smu->adev->dev, "TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau)do { } while(0); |
1611 | dev_info(smu->adev->dev, "TdcLimitGfx = %d\n", pptable->TdcLimitGfx)do { } while(0); |
1612 | dev_info(smu->adev->dev, "TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau)do { } while(0); |
1613 | |
1614 | dev_info(smu->adev->dev, "TedgeLimit = %d\n", pptable->TedgeLimit)do { } while(0); |
1615 | dev_info(smu->adev->dev, "ThotspotLimit = %d\n", pptable->ThotspotLimit)do { } while(0); |
1616 | dev_info(smu->adev->dev, "TmemLimit = %d\n", pptable->TmemLimit)do { } while(0); |
1617 | dev_info(smu->adev->dev, "Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit)do { } while(0); |
1618 | dev_info(smu->adev->dev, "Tvr_memLimit = %d\n", pptable->Tvr_memLimit)do { } while(0); |
1619 | dev_info(smu->adev->dev, "Tvr_socLimit = %d\n", pptable->Tvr_socLimit)do { } while(0); |
1620 | dev_info(smu->adev->dev, "FitLimit = %d\n", pptable->FitLimit)do { } while(0); |
1621 | |
1622 | dev_info(smu->adev->dev, "PpmPowerLimit = %d\n", pptable->PpmPowerLimit)do { } while(0); |
1623 | dev_info(smu->adev->dev, "PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold)do { } while(0); |
1624 | |
1625 | dev_info(smu->adev->dev, "ThrottlerControlMask = %d\n", pptable->ThrottlerControlMask)do { } while(0); |
1626 | |
1627 | dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx)do { } while(0); |
1628 | dev_info(smu->adev->dev, "UlvPadding = 0x%08x\n", pptable->UlvPadding)do { } while(0); |
1629 | |
1630 | dev_info(smu->adev->dev, "UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass)do { } while(0); |
1631 | dev_info(smu->adev->dev, "Padding234[0] = 0x%02x\n", pptable->Padding234[0])do { } while(0); |
1632 | dev_info(smu->adev->dev, "Padding234[1] = 0x%02x\n", pptable->Padding234[1])do { } while(0); |
1633 | dev_info(smu->adev->dev, "Padding234[2] = 0x%02x\n", pptable->Padding234[2])do { } while(0); |
1634 | |
1635 | dev_info(smu->adev->dev, "MinVoltageGfx = %d\n", pptable->MinVoltageGfx)do { } while(0); |
1636 | dev_info(smu->adev->dev, "MinVoltageSoc = %d\n", pptable->MinVoltageSoc)do { } while(0); |
1637 | dev_info(smu->adev->dev, "MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx)do { } while(0); |
1638 | dev_info(smu->adev->dev, "MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc)do { } while(0); |
1639 | |
1640 | dev_info(smu->adev->dev, "LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx)do { } while(0); |
1641 | dev_info(smu->adev->dev, "LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc)do { } while(0); |
1642 | |
1643 | dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n"do { } while(0) |
1644 | " .VoltageMode = 0x%02x\n"do { } while(0) |
1645 | " .SnapToDiscrete = 0x%02x\n"do { } while(0) |
1646 | " .NumDiscreteLevels = 0x%02x\n"do { } while(0) |
1647 | " .padding = 0x%02x\n"do { } while(0) |
1648 | " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"do { } while(0) |
1649 | " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"do { } while(0) |
1650 | " .SsFmin = 0x%04x\n"do { } while(0) |
1651 | " .Padding_16 = 0x%04x\n",do { } while(0) |
1652 | pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode,do { } while(0) |
1653 | pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete,do { } while(0) |
1654 | pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels,do { } while(0) |
1655 | pptable->DpmDescriptor[PPCLK_GFXCLK].padding,do { } while(0) |
1656 | pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m,do { } while(0) |
1657 | pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b,do { } while(0) |
1658 | pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a,do { } while(0) |
1659 | pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b,do { } while(0) |
1660 | pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c,do { } while(0) |
1661 | pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin,do { } while(0) |
1662 | pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16)do { } while(0); |
1663 | |
1664 | dev_info(smu->adev->dev, "[PPCLK_VCLK]\n"do { } while(0) |
1665 | " .VoltageMode = 0x%02x\n"do { } while(0) |
1666 | " .SnapToDiscrete = 0x%02x\n"do { } while(0) |
1667 | " .NumDiscreteLevels = 0x%02x\n"do { } while(0) |
1668 | " .padding = 0x%02x\n"do { } while(0) |
1669 | " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"do { } while(0) |
1670 | " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"do { } while(0) |
1671 | " .SsFmin = 0x%04x\n"do { } while(0) |
1672 | " .Padding_16 = 0x%04x\n",do { } while(0) |
1673 | pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode,do { } while(0) |
1674 | pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete,do { } while(0) |
1675 | pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels,do { } while(0) |
1676 | pptable->DpmDescriptor[PPCLK_VCLK].padding,do { } while(0) |
1677 | pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m,do { } while(0) |
1678 | pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b,do { } while(0) |
1679 | pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a,do { } while(0) |
1680 | pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b,do { } while(0) |
1681 | pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c,do { } while(0) |
1682 | pptable->DpmDescriptor[PPCLK_VCLK].SsFmin,do { } while(0) |
1683 | pptable->DpmDescriptor[PPCLK_VCLK].Padding16)do { } while(0); |
1684 | |
1685 | dev_info(smu->adev->dev, "[PPCLK_DCLK]\n"do { } while(0) |
1686 | " .VoltageMode = 0x%02x\n"do { } while(0) |
1687 | " .SnapToDiscrete = 0x%02x\n"do { } while(0) |
1688 | " .NumDiscreteLevels = 0x%02x\n"do { } while(0) |
1689 | " .padding = 0x%02x\n"do { } while(0) |
1690 | " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"do { } while(0) |
1691 | " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"do { } while(0) |
1692 | " .SsFmin = 0x%04x\n"do { } while(0) |
1693 | " .Padding_16 = 0x%04x\n",do { } while(0) |
1694 | pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode,do { } while(0) |
1695 | pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete,do { } while(0) |
1696 | pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels,do { } while(0) |
1697 | pptable->DpmDescriptor[PPCLK_DCLK].padding,do { } while(0) |
1698 | pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m,do { } while(0) |
1699 | pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b,do { } while(0) |
1700 | pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a,do { } while(0) |
1701 | pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b,do { } while(0) |
1702 | pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c,do { } while(0) |
1703 | pptable->DpmDescriptor[PPCLK_DCLK].SsFmin,do { } while(0) |
1704 | pptable->DpmDescriptor[PPCLK_DCLK].Padding16)do { } while(0); |
1705 | |
1706 | dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n"do { } while(0) |
1707 | " .VoltageMode = 0x%02x\n"do { } while(0) |
1708 | " .SnapToDiscrete = 0x%02x\n"do { } while(0) |
1709 | " .NumDiscreteLevels = 0x%02x\n"do { } while(0) |
1710 | " .padding = 0x%02x\n"do { } while(0) |
1711 | " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"do { } while(0) |
1712 | " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"do { } while(0) |
1713 | " .SsFmin = 0x%04x\n"do { } while(0) |
1714 | " .Padding_16 = 0x%04x\n",do { } while(0) |
1715 | pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode,do { } while(0) |
1716 | pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete,do { } while(0) |
1717 | pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels,do { } while(0) |
1718 | pptable->DpmDescriptor[PPCLK_SOCCLK].padding,do { } while(0) |
1719 | pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m,do { } while(0) |
1720 | pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b,do { } while(0) |
1721 | pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a,do { } while(0) |
1722 | pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b,do { } while(0) |
1723 | pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c,do { } while(0) |
1724 | pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin,do { } while(0) |
1725 | pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16)do { } while(0); |
1726 | |
1727 | dev_info(smu->adev->dev, "[PPCLK_UCLK]\n"do { } while(0) |
1728 | " .VoltageMode = 0x%02x\n"do { } while(0) |
1729 | " .SnapToDiscrete = 0x%02x\n"do { } while(0) |
1730 | " .NumDiscreteLevels = 0x%02x\n"do { } while(0) |
1731 | " .padding = 0x%02x\n"do { } while(0) |
1732 | " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"do { } while(0) |
1733 | " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"do { } while(0) |
1734 | " .SsFmin = 0x%04x\n"do { } while(0) |
1735 | " .Padding_16 = 0x%04x\n",do { } while(0) |
1736 | pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode,do { } while(0) |
1737 | pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete,do { } while(0) |
1738 | pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels,do { } while(0) |
1739 | pptable->DpmDescriptor[PPCLK_UCLK].padding,do { } while(0) |
1740 | pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m,do { } while(0) |
1741 | pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b,do { } while(0) |
1742 | pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a,do { } while(0) |
1743 | pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b,do { } while(0) |
1744 | pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c,do { } while(0) |
1745 | pptable->DpmDescriptor[PPCLK_UCLK].SsFmin,do { } while(0) |
1746 | pptable->DpmDescriptor[PPCLK_UCLK].Padding16)do { } while(0); |
1747 | |
1748 | dev_info(smu->adev->dev, "[PPCLK_FCLK]\n"do { } while(0) |
1749 | " .VoltageMode = 0x%02x\n"do { } while(0) |
1750 | " .SnapToDiscrete = 0x%02x\n"do { } while(0) |
1751 | " .NumDiscreteLevels = 0x%02x\n"do { } while(0) |
1752 | " .padding = 0x%02x\n"do { } while(0) |
1753 | " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"do { } while(0) |
1754 | " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"do { } while(0) |
1755 | " .SsFmin = 0x%04x\n"do { } while(0) |
1756 | " .Padding_16 = 0x%04x\n",do { } while(0) |
1757 | pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode,do { } while(0) |
1758 | pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete,do { } while(0) |
1759 | pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels,do { } while(0) |
1760 | pptable->DpmDescriptor[PPCLK_FCLK].padding,do { } while(0) |
1761 | pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m,do { } while(0) |
1762 | pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b,do { } while(0) |
1763 | pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a,do { } while(0) |
1764 | pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b,do { } while(0) |
1765 | pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c,do { } while(0) |
1766 | pptable->DpmDescriptor[PPCLK_FCLK].SsFmin,do { } while(0) |
1767 | pptable->DpmDescriptor[PPCLK_FCLK].Padding16)do { } while(0); |
1768 | |
1769 | |
1770 | dev_info(smu->adev->dev, "FreqTableGfx\n")do { } while(0); |
1771 | for (i = 0; i < NUM_GFXCLK_DPM_LEVELS16; i++) |
1772 | dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableGfx[i])do { } while(0); |
1773 | |
1774 | dev_info(smu->adev->dev, "FreqTableVclk\n")do { } while(0); |
1775 | for (i = 0; i < NUM_VCLK_DPM_LEVELS8; i++) |
1776 | dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableVclk[i])do { } while(0); |
1777 | |
1778 | dev_info(smu->adev->dev, "FreqTableDclk\n")do { } while(0); |
1779 | for (i = 0; i < NUM_DCLK_DPM_LEVELS8; i++) |
1780 | dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableDclk[i])do { } while(0); |
1781 | |
1782 | dev_info(smu->adev->dev, "FreqTableSocclk\n")do { } while(0); |
1783 | for (i = 0; i < NUM_SOCCLK_DPM_LEVELS8; i++) |
1784 | dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableSocclk[i])do { } while(0); |
1785 | |
1786 | dev_info(smu->adev->dev, "FreqTableUclk\n")do { } while(0); |
1787 | for (i = 0; i < NUM_UCLK_DPM_LEVELS4; i++) |
1788 | dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableUclk[i])do { } while(0); |
1789 | |
1790 | dev_info(smu->adev->dev, "FreqTableFclk\n")do { } while(0); |
1791 | for (i = 0; i < NUM_FCLK_DPM_LEVELS8; i++) |
1792 | dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableFclk[i])do { } while(0); |
1793 | |
1794 | dev_info(smu->adev->dev, "Mp0clkFreq\n")do { } while(0); |
1795 | for (i = 0; i < NUM_MP0CLK_DPM_LEVELS2; i++) |
1796 | dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0clkFreq[i])do { } while(0); |
1797 | |
1798 | dev_info(smu->adev->dev, "Mp0DpmVoltage\n")do { } while(0); |
1799 | for (i = 0; i < NUM_MP0CLK_DPM_LEVELS2; i++) |
1800 | dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i])do { } while(0); |
1801 | |
1802 | dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle)do { } while(0); |
1803 | dev_info(smu->adev->dev, "GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate)do { } while(0); |
1804 | dev_info(smu->adev->dev, "Padding567[0] = 0x%x\n", pptable->Padding567[0])do { } while(0); |
1805 | dev_info(smu->adev->dev, "Padding567[1] = 0x%x\n", pptable->Padding567[1])do { } while(0); |
1806 | dev_info(smu->adev->dev, "Padding567[2] = 0x%x\n", pptable->Padding567[2])do { } while(0); |
1807 | dev_info(smu->adev->dev, "Padding567[3] = 0x%x\n", pptable->Padding567[3])do { } while(0); |
1808 | dev_info(smu->adev->dev, "GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq)do { } while(0); |
1809 | dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource)do { } while(0); |
1810 | dev_info(smu->adev->dev, "Padding456 = 0x%x\n", pptable->Padding456)do { } while(0); |
1811 | |
1812 | dev_info(smu->adev->dev, "EnableTdpm = %d\n", pptable->EnableTdpm)do { } while(0); |
1813 | dev_info(smu->adev->dev, "TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature)do { } while(0); |
1814 | dev_info(smu->adev->dev, "TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature)do { } while(0); |
1815 | dev_info(smu->adev->dev, "GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit)do { } while(0); |
1816 | |
1817 | dev_info(smu->adev->dev, "FanStopTemp = %d\n", pptable->FanStopTemp)do { } while(0); |
1818 | dev_info(smu->adev->dev, "FanStartTemp = %d\n", pptable->FanStartTemp)do { } while(0); |
1819 | |
1820 | dev_info(smu->adev->dev, "FanGainEdge = %d\n", pptable->FanGainEdge)do { } while(0); |
1821 | dev_info(smu->adev->dev, "FanGainHotspot = %d\n", pptable->FanGainHotspot)do { } while(0); |
1822 | dev_info(smu->adev->dev, "FanGainVrGfx = %d\n", pptable->FanGainVrGfx)do { } while(0); |
1823 | dev_info(smu->adev->dev, "FanGainVrSoc = %d\n", pptable->FanGainVrSoc)do { } while(0); |
1824 | dev_info(smu->adev->dev, "FanGainVrMem = %d\n", pptable->FanGainVrMem)do { } while(0); |
1825 | dev_info(smu->adev->dev, "FanGainHbm = %d\n", pptable->FanGainHbm)do { } while(0); |
1826 | |
1827 | dev_info(smu->adev->dev, "FanPwmMin = %d\n", pptable->FanPwmMin)do { } while(0); |
1828 | dev_info(smu->adev->dev, "FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm)do { } while(0); |
1829 | dev_info(smu->adev->dev, "FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm)do { } while(0); |
1830 | dev_info(smu->adev->dev, "FanMaximumRpm = %d\n", pptable->FanMaximumRpm)do { } while(0); |
1831 | dev_info(smu->adev->dev, "FanTargetTemperature = %d\n", pptable->FanTargetTemperature)do { } while(0); |
1832 | dev_info(smu->adev->dev, "FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk)do { } while(0); |
1833 | dev_info(smu->adev->dev, "FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable)do { } while(0); |
1834 | dev_info(smu->adev->dev, "FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev)do { } while(0); |
1835 | dev_info(smu->adev->dev, "FanTempInputSelect = %d\n", pptable->FanTempInputSelect)do { } while(0); |
1836 | |
1837 | dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta)do { } while(0); |
1838 | dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta)do { } while(0); |
1839 | dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta)do { } while(0); |
1840 | dev_info(smu->adev->dev, "FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved)do { } while(0); |
1841 | |
1842 | dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX])do { } while(0); |
1843 | dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC])do { } while(0); |
1844 | dev_info(smu->adev->dev, "Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0])do { } while(0); |
1845 | dev_info(smu->adev->dev, "Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1])do { } while(0); |
1846 | |
1847 | dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n",do { } while(0) |
1848 | pptable->dBtcGbGfxPll.a,do { } while(0) |
1849 | pptable->dBtcGbGfxPll.b,do { } while(0) |
1850 | pptable->dBtcGbGfxPll.c)do { } while(0); |
1851 | dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n",do { } while(0) |
1852 | pptable->dBtcGbGfxAfll.a,do { } while(0) |
1853 | pptable->dBtcGbGfxAfll.b,do { } while(0) |
1854 | pptable->dBtcGbGfxAfll.c)do { } while(0); |
1855 | dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n",do { } while(0) |
1856 | pptable->dBtcGbSoc.a,do { } while(0) |
1857 | pptable->dBtcGbSoc.b,do { } while(0) |
1858 | pptable->dBtcGbSoc.c)do { } while(0); |
1859 | |
1860 | dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n",do { } while(0) |
1861 | pptable->qAgingGb[AVFS_VOLTAGE_GFX].m,do { } while(0) |
1862 | pptable->qAgingGb[AVFS_VOLTAGE_GFX].b)do { } while(0); |
1863 | dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n",do { } while(0) |
1864 | pptable->qAgingGb[AVFS_VOLTAGE_SOC].m,do { } while(0) |
1865 | pptable->qAgingGb[AVFS_VOLTAGE_SOC].b)do { } while(0); |
1866 | |
1867 | dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",do { } while(0) |
1868 | pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a,do { } while(0) |
1869 | pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b,do { } while(0) |
1870 | pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c)do { } while(0); |
1871 | dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",do { } while(0) |
1872 | pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a,do { } while(0) |
1873 | pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b,do { } while(0) |
1874 | pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c)do { } while(0); |
1875 | |
1876 | dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX])do { } while(0); |
1877 | dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC])do { } while(0); |
1878 | |
1879 | dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX])do { } while(0); |
1880 | dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC])do { } while(0); |
1881 | dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0])do { } while(0); |
1882 | dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1])do { } while(0); |
1883 | |
1884 | dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX])do { } while(0); |
1885 | dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC])do { } while(0); |
1886 | dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX])do { } while(0); |
1887 | dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC])do { } while(0); |
1888 | |
1889 | dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX])do { } while(0); |
1890 | dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC])do { } while(0); |
1891 | |
1892 | dev_info(smu->adev->dev, "XgmiDpmPstates\n")do { } while(0); |
1893 | for (i = 0; i < NUM_XGMI_LEVELS2; i++) |
1894 | dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiDpmPstates[i])do { } while(0); |
1895 | dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0])do { } while(0); |
1896 | dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1])do { } while(0); |
1897 | |
1898 | dev_info(smu->adev->dev, "VDDGFX_TVmin = %d\n", pptable->VDDGFX_TVmin)do { } while(0); |
1899 | dev_info(smu->adev->dev, "VDDSOC_TVmin = %d\n", pptable->VDDSOC_TVmin)do { } while(0); |
1900 | dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = %d\n", pptable->VDDGFX_Vmin_HiTemp)do { } while(0); |
1901 | dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = %d\n", pptable->VDDGFX_Vmin_LoTemp)do { } while(0); |
1902 | dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = %d\n", pptable->VDDSOC_Vmin_HiTemp)do { } while(0); |
1903 | dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = %d\n", pptable->VDDSOC_Vmin_LoTemp)do { } while(0); |
1904 | dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = %d\n", pptable->VDDGFX_TVminHystersis)do { } while(0); |
1905 | dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = %d\n", pptable->VDDSOC_TVminHystersis)do { } while(0); |
1906 | |
1907 | dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides)do { } while(0); |
1908 | dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n",do { } while(0) |
1909 | pptable->ReservedEquation0.a,do { } while(0) |
1910 | pptable->ReservedEquation0.b,do { } while(0) |
1911 | pptable->ReservedEquation0.c)do { } while(0); |
1912 | dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n",do { } while(0) |
1913 | pptable->ReservedEquation1.a,do { } while(0) |
1914 | pptable->ReservedEquation1.b,do { } while(0) |
1915 | pptable->ReservedEquation1.c)do { } while(0); |
1916 | dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n",do { } while(0) |
1917 | pptable->ReservedEquation2.a,do { } while(0) |
1918 | pptable->ReservedEquation2.b,do { } while(0) |
1919 | pptable->ReservedEquation2.c)do { } while(0); |
1920 | dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n",do { } while(0) |
1921 | pptable->ReservedEquation3.a,do { } while(0) |
1922 | pptable->ReservedEquation3.b,do { } while(0) |
1923 | pptable->ReservedEquation3.c)do { } while(0); |
1924 | |
1925 | dev_info(smu->adev->dev, "MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx)do { } while(0); |
1926 | dev_info(smu->adev->dev, "PaddingUlv = %d\n", pptable->PaddingUlv)do { } while(0); |
1927 | |
1928 | dev_info(smu->adev->dev, "TotalPowerConfig = %d\n", pptable->TotalPowerConfig)do { } while(0); |
1929 | dev_info(smu->adev->dev, "TotalPowerSpare1 = %d\n", pptable->TotalPowerSpare1)do { } while(0); |
1930 | dev_info(smu->adev->dev, "TotalPowerSpare2 = %d\n", pptable->TotalPowerSpare2)do { } while(0); |
1931 | |
1932 | dev_info(smu->adev->dev, "PccThresholdLow = %d\n", pptable->PccThresholdLow)do { } while(0); |
1933 | dev_info(smu->adev->dev, "PccThresholdHigh = %d\n", pptable->PccThresholdHigh)do { } while(0); |
1934 | |
1935 | dev_info(smu->adev->dev, "Board Parameters:\n")do { } while(0); |
1936 | dev_info(smu->adev->dev, "MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx)do { } while(0); |
1937 | dev_info(smu->adev->dev, "MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc)do { } while(0); |
1938 | |
1939 | dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping)do { } while(0); |
1940 | dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping)do { } while(0); |
1941 | dev_info(smu->adev->dev, "VddMemVrMapping = 0x%x\n", pptable->VddMemVrMapping)do { } while(0); |
1942 | dev_info(smu->adev->dev, "BoardVrMapping = 0x%x\n", pptable->BoardVrMapping)do { } while(0); |
1943 | |
1944 | dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask)do { } while(0); |
1945 | dev_info(smu->adev->dev, "ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent)do { } while(0); |
1946 | |
1947 | dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent)do { } while(0); |
1948 | dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset)do { } while(0); |
1949 | dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx)do { } while(0); |
1950 | |
1951 | dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent)do { } while(0); |
1952 | dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset)do { } while(0); |
1953 | dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc)do { } while(0); |
1954 | |
1955 | dev_info(smu->adev->dev, "MemMaxCurrent = 0x%x\n", pptable->MemMaxCurrent)do { } while(0); |
1956 | dev_info(smu->adev->dev, "MemOffset = 0x%x\n", pptable->MemOffset)do { } while(0); |
1957 | dev_info(smu->adev->dev, "Padding_TelemetryMem = 0x%x\n", pptable->Padding_TelemetryMem)do { } while(0); |
1958 | |
1959 | dev_info(smu->adev->dev, "BoardMaxCurrent = 0x%x\n", pptable->BoardMaxCurrent)do { } while(0); |
1960 | dev_info(smu->adev->dev, "BoardOffset = 0x%x\n", pptable->BoardOffset)do { } while(0); |
1961 | dev_info(smu->adev->dev, "Padding_TelemetryBoardInput = 0x%x\n", pptable->Padding_TelemetryBoardInput)do { } while(0); |
1962 | |
1963 | dev_info(smu->adev->dev, "VR0HotGpio = %d\n", pptable->VR0HotGpio)do { } while(0); |
1964 | dev_info(smu->adev->dev, "VR0HotPolarity = %d\n", pptable->VR0HotPolarity)do { } while(0); |
1965 | dev_info(smu->adev->dev, "VR1HotGpio = %d\n", pptable->VR1HotGpio)do { } while(0); |
1966 | dev_info(smu->adev->dev, "VR1HotPolarity = %d\n", pptable->VR1HotPolarity)do { } while(0); |
1967 | |
1968 | dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled)do { } while(0); |
1969 | dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent)do { } while(0); |
1970 | dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq)do { } while(0); |
1971 | |
1972 | dev_info(smu->adev->dev, "UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled)do { } while(0); |
1973 | dev_info(smu->adev->dev, "UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent)do { } while(0); |
1974 | dev_info(smu->adev->dev, "UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq)do { } while(0); |
1975 | |
1976 | dev_info(smu->adev->dev, "FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled)do { } while(0); |
1977 | dev_info(smu->adev->dev, "FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent)do { } while(0); |
1978 | dev_info(smu->adev->dev, "FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq)do { } while(0); |
1979 | |
1980 | dev_info(smu->adev->dev, "FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled)do { } while(0); |
1981 | dev_info(smu->adev->dev, "FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent)do { } while(0); |
1982 | dev_info(smu->adev->dev, "FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq)do { } while(0); |
1983 | |
1984 | for (i = 0; i < NUM_I2C_CONTROLLERS8; i++) { |
1985 | dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i)do { } while(0); |
1986 | dev_info(smu->adev->dev, " .Enabled = %d\n",do { } while(0) |
1987 | pptable->I2cControllers[i].Enabled)do { } while(0); |
1988 | dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n",do { } while(0) |
1989 | pptable->I2cControllers[i].SlaveAddress)do { } while(0); |
1990 | dev_info(smu->adev->dev, " .ControllerPort = %d\n",do { } while(0) |
1991 | pptable->I2cControllers[i].ControllerPort)do { } while(0); |
1992 | dev_info(smu->adev->dev, " .ControllerName = %d\n",do { } while(0) |
1993 | pptable->I2cControllers[i].ControllerName)do { } while(0); |
1994 | dev_info(smu->adev->dev, " .ThermalThrottler = %d\n",do { } while(0) |
1995 | pptable->I2cControllers[i].ThermalThrotter)do { } while(0); |
1996 | dev_info(smu->adev->dev, " .I2cProtocol = %d\n",do { } while(0) |
1997 | pptable->I2cControllers[i].I2cProtocol)do { } while(0); |
1998 | dev_info(smu->adev->dev, " .Speed = %d\n",do { } while(0) |
1999 | pptable->I2cControllers[i].Speed)do { } while(0); |
2000 | } |
2001 | |
2002 | dev_info(smu->adev->dev, "MemoryChannelEnabled = %d\n", pptable->MemoryChannelEnabled)do { } while(0); |
2003 | dev_info(smu->adev->dev, "DramBitWidth = %d\n", pptable->DramBitWidth)do { } while(0); |
2004 | |
2005 | dev_info(smu->adev->dev, "TotalBoardPower = %d\n", pptable->TotalBoardPower)do { } while(0); |
2006 | |
2007 | dev_info(smu->adev->dev, "XgmiLinkSpeed\n")do { } while(0); |
2008 | for (i = 0; i < NUM_XGMI_PSTATE_LEVELS4; i++) |
2009 | dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i])do { } while(0); |
2010 | dev_info(smu->adev->dev, "XgmiLinkWidth\n")do { } while(0); |
2011 | for (i = 0; i < NUM_XGMI_PSTATE_LEVELS4; i++) |
2012 | dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkWidth[i])do { } while(0); |
2013 | dev_info(smu->adev->dev, "XgmiFclkFreq\n")do { } while(0); |
2014 | for (i = 0; i < NUM_XGMI_PSTATE_LEVELS4; i++) |
2015 | dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiFclkFreq[i])do { } while(0); |
2016 | dev_info(smu->adev->dev, "XgmiSocVoltage\n")do { } while(0); |
2017 | for (i = 0; i < NUM_XGMI_PSTATE_LEVELS4; i++) |
2018 | dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiSocVoltage[i])do { } while(0); |
2019 | |
2020 | } |
2021 | |
2022 | static bool_Bool arcturus_is_dpm_running(struct smu_context *smu) |
2023 | { |
2024 | int ret = 0; |
2025 | uint64_t feature_enabled; |
2026 | |
2027 | ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); |
2028 | if (ret) |
2029 | return false0; |
2030 | |
2031 | return !!(feature_enabled & SMC_DPM_FEATURE( (1 << 0 ) | (1 << 1 ) | (1 << 2 ) | (1 << 3 ) | (1 << 5 ) | (1 << 4 ) | (1 << 6 ))); |
2032 | } |
2033 | |
2034 | static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool_Bool enable) |
2035 | { |
2036 | int ret = 0; |
2037 | |
2038 | if (enable) { |
2039 | if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_DPM_BIT)) { |
2040 | ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_DPM_BIT, 1); |
2041 | if (ret) { |
2042 | dev_err(smu->adev->dev, "[EnableVCNDPM] failed!\n")printf("drm:pid%d:%s *ERROR* " "[EnableVCNDPM] failed!\n", ({ struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2043 | return ret; |
2044 | } |
2045 | } |
2046 | } else { |
2047 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_DPM_BIT)) { |
2048 | ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_DPM_BIT, 0); |
2049 | if (ret) { |
2050 | dev_err(smu->adev->dev, "[DisableVCNDPM] failed!\n")printf("drm:pid%d:%s *ERROR* " "[DisableVCNDPM] failed!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2051 | return ret; |
2052 | } |
2053 | } |
2054 | } |
2055 | |
2056 | return ret; |
2057 | } |
2058 | |
2059 | static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap, |
2060 | struct i2c_msg *msg, int num_msgs) |
2061 | { |
2062 | struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); |
2063 | struct amdgpu_device *adev = smu_i2c->adev; |
2064 | struct smu_context *smu = adev->powerplay.pp_handle; |
2065 | struct smu_table_context *smu_table = &smu->smu_table; |
2066 | struct smu_table *table = &smu_table->driver_table; |
2067 | SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; |
2068 | int i, j, r, c; |
2069 | u16 dir; |
2070 | |
2071 | if (!adev->pm.dpm_enabled) |
2072 | return -EBUSY16; |
2073 | |
2074 | req = kzalloc(sizeof(*req), GFP_KERNEL(0x0001 | 0x0004)); |
2075 | if (!req) |
2076 | return -ENOMEM12; |
2077 | |
2078 | req->I2CcontrollerPort = smu_i2c->port; |
2079 | req->I2CSpeed = I2C_SPEED_FAST_400K; |
2080 | req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ |
2081 | dir = msg[0].flags & I2C_M_RD0x0001; |
2082 | |
2083 | for (c = i = 0; i < num_msgs; i++) { |
2084 | for (j = 0; j < msg[i].len; j++, c++) { |
2085 | SwI2cCmd_t *cmd = &req->SwI2cCmds[c]; |
2086 | |
2087 | if (!(msg[i].flags & I2C_M_RD0x0001)) { |
2088 | /* write */ |
2089 | cmd->Cmd = I2C_CMD_WRITE; |
2090 | cmd->RegisterAddr = msg[i].buf[j]; |
2091 | } |
2092 | |
2093 | if ((dir ^ msg[i].flags) & I2C_M_RD0x0001) { |
2094 | /* The direction changes. |
2095 | */ |
2096 | dir = msg[i].flags & I2C_M_RD0x0001; |
2097 | cmd->CmdConfig |= CMDCONFIG_RESTART_MASK(1 << 1); |
2098 | } |
2099 | |
2100 | req->NumCmds++; |
2101 | |
2102 | /* |
2103 | * Insert STOP if we are at the last byte of either last |
2104 | * message for the transaction or the client explicitly |
2105 | * requires a STOP at this particular message. |
2106 | */ |
2107 | if ((j == msg[i].len - 1) && |
2108 | ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP0x0004))) { |
2109 | cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK(1 << 1); |
2110 | cmd->CmdConfig |= CMDCONFIG_STOP_MASK(1 << 0); |
2111 | } |
2112 | } |
2113 | } |
2114 | mutex_lock(&adev->pm.mutex)rw_enter_write(&adev->pm.mutex); |
2115 | r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true1); |
2116 | mutex_unlock(&adev->pm.mutex)rw_exit_write(&adev->pm.mutex); |
2117 | if (r) |
2118 | goto fail; |
2119 | |
2120 | for (c = i = 0; i < num_msgs; i++) { |
2121 | if (!(msg[i].flags & I2C_M_RD0x0001)) { |
2122 | c += msg[i].len; |
2123 | continue; |
2124 | } |
2125 | for (j = 0; j < msg[i].len; j++, c++) { |
2126 | SwI2cCmd_t *cmd = &res->SwI2cCmds[c]; |
2127 | |
2128 | msg[i].buf[j] = cmd->Data; |
2129 | } |
2130 | } |
2131 | r = num_msgs; |
2132 | fail: |
2133 | kfree(req); |
2134 | return r; |
2135 | } |
2136 | |
2137 | static u32 arcturus_i2c_func(struct i2c_adapter *adap) |
2138 | { |
2139 | return I2C_FUNC_I2C0 | I2C_FUNC_SMBUS_EMUL0; |
2140 | } |
2141 | |
2142 | |
2143 | static const struct i2c_algorithm arcturus_i2c_algo = { |
2144 | .master_xfer = arcturus_i2c_xfer, |
2145 | .functionality = arcturus_i2c_func, |
2146 | }; |
2147 | |
2148 | |
2149 | static const struct i2c_adapter_quirks arcturus_i2c_control_quirks = { |
2150 | .flags = I2C_AQ_COMB0 | I2C_AQ_COMB_SAME_ADDR0 | I2C_AQ_NO_ZERO_LEN0, |
2151 | .max_read_len = MAX_SW_I2C_COMMANDS8, |
2152 | .max_write_len = MAX_SW_I2C_COMMANDS8, |
2153 | .max_comb_1st_msg_len = 2, |
2154 | .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS8 - 2, |
2155 | }; |
2156 | |
2157 | static int arcturus_i2c_control_init(struct smu_context *smu) |
2158 | { |
2159 | struct amdgpu_device *adev = smu->adev; |
2160 | int res, i; |
2161 | |
2162 | for (i = 0; i < MAX_SMU_I2C_BUSES2; i++) { |
2163 | struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; |
2164 | struct i2c_adapter *control = &smu_i2c->adapter; |
2165 | |
2166 | smu_i2c->adev = adev; |
2167 | smu_i2c->port = i; |
2168 | rw_init(&smu_i2c->mutex, "arsmuiic")_rw_init_flags(&smu_i2c->mutex, "arsmuiic", 0, ((void * )0)); |
2169 | #ifdef __linux__ |
2170 | control->owner = THIS_MODULE((void *)0); |
2171 | control->class = I2C_CLASS_HWMON; |
2172 | control->dev.parent = &adev->pdev->dev; |
2173 | #endif |
2174 | control->algo = &arcturus_i2c_algo; |
2175 | control->quirks = &arcturus_i2c_control_quirks; |
2176 | snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); |
2177 | i2c_set_adapdata(control, smu_i2c); |
2178 | |
2179 | res = i2c_add_adapter(control); |
2180 | if (res) { |
2181 | DRM_ERROR("Failed to register hw i2c, err: %d\n", res)__drm_err("Failed to register hw i2c, err: %d\n", res); |
2182 | goto Out_err; |
2183 | } |
2184 | } |
2185 | |
2186 | adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; |
2187 | adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; |
2188 | |
2189 | return 0; |
2190 | Out_err: |
2191 | for ( ; i >= 0; i--) { |
2192 | struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; |
2193 | struct i2c_adapter *control = &smu_i2c->adapter; |
2194 | |
2195 | i2c_del_adapter(control); |
2196 | } |
2197 | return res; |
2198 | } |
2199 | |
2200 | static void arcturus_i2c_control_fini(struct smu_context *smu) |
2201 | { |
2202 | struct amdgpu_device *adev = smu->adev; |
2203 | int i; |
2204 | |
2205 | for (i = 0; i < MAX_SMU_I2C_BUSES2; i++) { |
2206 | struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; |
2207 | struct i2c_adapter *control = &smu_i2c->adapter; |
2208 | |
2209 | i2c_del_adapter(control); |
2210 | } |
2211 | adev->pm.ras_eeprom_i2c_bus = NULL((void *)0); |
2212 | adev->pm.fru_eeprom_i2c_bus = NULL((void *)0); |
2213 | } |
2214 | |
2215 | static void arcturus_get_unique_id(struct smu_context *smu) |
2216 | { |
2217 | struct amdgpu_device *adev = smu->adev; |
2218 | uint32_t top32 = 0, bottom32 = 0, smu_version; |
2219 | uint64_t id; |
2220 | |
2221 | if (smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version)) { |
2222 | dev_warn(adev->dev, "Failed to get smu version, cannot get unique_id or serial_number\n")printf("drm:pid%d:%s *WARNING* " "Failed to get smu version, cannot get unique_id or serial_number\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2223 | return; |
2224 | } |
2225 | |
2226 | /* PPSMC_MSG_ReadSerial* is supported by 54.23.0 and onwards */ |
2227 | if (smu_version < 0x361700) { |
2228 | dev_warn(adev->dev, "ReadSerial is only supported by PMFW 54.23.0 and onwards\n")printf("drm:pid%d:%s *WARNING* " "ReadSerial is only supported by PMFW 54.23.0 and onwards\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2229 | return; |
2230 | } |
2231 | |
2232 | /* Get the SN to turn into a Unique ID */ |
2233 | smu_cmn_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, &top32); |
2234 | smu_cmn_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, &bottom32); |
2235 | |
2236 | id = ((uint64_t)bottom32 << 32) | top32; |
2237 | adev->unique_id = id; |
2238 | /* For Arcturus-and-later, unique_id == serial_number, so convert it to a |
2239 | * 16-digit HEX string for convenience and backwards-compatibility |
2240 | */ |
2241 | snprintf(adev->serial, sizeof(adev->serial), "%llx", id); |
2242 | } |
2243 | |
2244 | static int arcturus_set_df_cstate(struct smu_context *smu, |
2245 | enum pp_df_cstate state) |
2246 | { |
2247 | struct amdgpu_device *adev = smu->adev; |
2248 | uint32_t smu_version; |
2249 | int ret; |
2250 | |
2251 | /* |
2252 | * Arcturus does not need the cstate disablement |
2253 | * prerequisite for gpu reset. |
2254 | */ |
2255 | if (amdgpu_in_reset(adev) || adev->in_suspend) |
2256 | return 0; |
2257 | |
2258 | ret = smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version); |
2259 | if (ret) { |
2260 | dev_err(smu->adev->dev, "Failed to get smu version!\n")printf("drm:pid%d:%s *ERROR* " "Failed to get smu version!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2261 | return ret; |
2262 | } |
2263 | |
2264 | /* PPSMC_MSG_DFCstateControl is supported by 54.15.0 and onwards */ |
2265 | if (smu_version < 0x360F00) { |
2266 | dev_err(smu->adev->dev, "DFCstateControl is only supported by PMFW 54.15.0 and onwards\n")printf("drm:pid%d:%s *ERROR* " "DFCstateControl is only supported by PMFW 54.15.0 and onwards\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2267 | return -EINVAL22; |
2268 | } |
2269 | |
2270 | return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL((void *)0)); |
2271 | } |
2272 | |
2273 | static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool_Bool en) |
2274 | { |
2275 | uint32_t smu_version; |
2276 | int ret; |
2277 | |
2278 | ret = smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version); |
2279 | if (ret) { |
2280 | dev_err(smu->adev->dev, "Failed to get smu version!\n")printf("drm:pid%d:%s *ERROR* " "Failed to get smu version!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2281 | return ret; |
2282 | } |
2283 | |
2284 | /* PPSMC_MSG_GmiPwrDnControl is supported by 54.23.0 and onwards */ |
2285 | if (smu_version < 0x00361700) { |
2286 | dev_err(smu->adev->dev, "XGMI power down control is only supported by PMFW 54.23.0 and onwards\n")printf("drm:pid%d:%s *ERROR* " "XGMI power down control is only supported by PMFW 54.23.0 and onwards\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2287 | return -EINVAL22; |
2288 | } |
2289 | |
2290 | if (en) |
2291 | return smu_cmn_send_smc_msg_with_param(smu, |
2292 | SMU_MSG_GmiPwrDnControl, |
2293 | 1, |
2294 | NULL((void *)0)); |
2295 | |
2296 | return smu_cmn_send_smc_msg_with_param(smu, |
2297 | SMU_MSG_GmiPwrDnControl, |
2298 | 0, |
2299 | NULL((void *)0)); |
2300 | } |
2301 | |
2302 | static const struct throttling_logging_label { |
2303 | uint32_t feature_mask; |
2304 | const char *label; |
2305 | } logging_label[] = { |
2306 | {(1U << THROTTLER_TEMP_HOTSPOT_BIT2), "GPU"}, |
2307 | {(1U << THROTTLER_TEMP_MEM_BIT3), "HBM"}, |
2308 | {(1U << THROTTLER_TEMP_VR_GFX_BIT4), "VR of GFX rail"}, |
2309 | {(1U << THROTTLER_TEMP_VR_MEM_BIT5), "VR of HBM rail"}, |
2310 | {(1U << THROTTLER_TEMP_VR_SOC_BIT6), "VR of SOC rail"}, |
2311 | {(1U << THROTTLER_VRHOT0_BIT16), "VR0 HOT"}, |
2312 | {(1U << THROTTLER_VRHOT1_BIT17), "VR1 HOT"}, |
2313 | }; |
2314 | static void arcturus_log_thermal_throttling_event(struct smu_context *smu) |
2315 | { |
2316 | int ret; |
2317 | int throttler_idx, throtting_events = 0, buf_idx = 0; |
2318 | struct amdgpu_device *adev = smu->adev; |
2319 | uint32_t throttler_status; |
2320 | char log_buf[256]; |
2321 | |
2322 | ret = arcturus_get_smu_metrics_data(smu, |
2323 | METRICS_THROTTLER_STATUS, |
2324 | &throttler_status); |
2325 | if (ret) |
2326 | return; |
2327 | |
2328 | memset(log_buf, 0, sizeof(log_buf))__builtin_memset((log_buf), (0), (sizeof(log_buf))); |
2329 | for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label)(sizeof((logging_label)) / sizeof((logging_label)[0])); |
2330 | throttler_idx++) { |
2331 | if (throttler_status & logging_label[throttler_idx].feature_mask) { |
2332 | throtting_events++; |
2333 | buf_idx += snprintf(log_buf + buf_idx, |
2334 | sizeof(log_buf) - buf_idx, |
2335 | "%s%s", |
2336 | throtting_events > 1 ? " and " : "", |
2337 | logging_label[throttler_idx].label); |
2338 | if (buf_idx >= sizeof(log_buf)) { |
2339 | dev_err(adev->dev, "buffer overflow!\n")printf("drm:pid%d:%s *ERROR* " "buffer overflow!\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__); |
2340 | log_buf[sizeof(log_buf) - 1] = '\0'; |
2341 | break; |
2342 | } |
2343 | } |
2344 | } |
2345 | |
2346 | dev_warn(adev->dev, "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n",printf("drm:pid%d:%s *WARNING* " "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , log_buf ) |
2347 | log_buf)printf("drm:pid%d:%s *WARNING* " "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , log_buf ); |
2348 | kgd2kfd_smi_event_throttle(smu->adev->kfd.dev, |
2349 | smu_cmn_get_indep_throttler_status(throttler_status, |
2350 | arcturus_throttler_map)); |
2351 | } |
2352 | |
2353 | static uint16_t arcturus_get_current_pcie_link_speed(struct smu_context *smu) |
2354 | { |
2355 | struct amdgpu_device *adev = smu->adev; |
2356 | uint32_t esm_ctrl; |
2357 | |
2358 | /* TODO: confirm this on real target */ |
2359 | esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL)adev->pcie_rreg(adev, (0x111003D0)); |
2360 | if ((esm_ctrl >> 15) & 0x1FFFF) |
2361 | return (uint16_t)(((esm_ctrl >> 8) & 0x3F) + 128); |
2362 | |
2363 | return smu_v11_0_get_current_pcie_link_speed(smu); |
2364 | } |
2365 | |
2366 | static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu, |
2367 | void **table) |
2368 | { |
2369 | struct smu_table_context *smu_table = &smu->smu_table; |
2370 | struct gpu_metrics_v1_3 *gpu_metrics = |
2371 | (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; |
2372 | SmuMetrics_t metrics; |
2373 | int ret = 0; |
2374 | |
2375 | ret = smu_cmn_get_metrics_table(smu, |
2376 | &metrics, |
2377 | true1); |
2378 | if (ret) |
2379 | return ret; |
2380 | |
2381 | smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); |
2382 | |
2383 | gpu_metrics->temperature_edge = metrics.TemperatureEdge; |
2384 | gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; |
2385 | gpu_metrics->temperature_mem = metrics.TemperatureHBM; |
2386 | gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; |
2387 | gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; |
2388 | gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem; |
2389 | |
2390 | gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; |
2391 | gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; |
2392 | gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage; |
2393 | |
2394 | gpu_metrics->average_socket_power = metrics.AverageSocketPower; |
2395 | gpu_metrics->energy_accumulator = metrics.EnergyAccumulator; |
2396 | |
2397 | gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; |
2398 | gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; |
2399 | gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency; |
2400 | gpu_metrics->average_vclk0_frequency = metrics.AverageVclkFrequency; |
2401 | gpu_metrics->average_dclk0_frequency = metrics.AverageDclkFrequency; |
2402 | |
2403 | gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; |
2404 | gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; |
2405 | gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; |
2406 | gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK]; |
2407 | gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; |
2408 | |
2409 | gpu_metrics->throttle_status = metrics.ThrottlerStatus; |
2410 | gpu_metrics->indep_throttle_status = |
2411 | smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, |
2412 | arcturus_throttler_map); |
2413 | |
2414 | gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; |
2415 | |
2416 | gpu_metrics->pcie_link_width = |
2417 | smu_v11_0_get_current_pcie_link_width(smu); |
2418 | gpu_metrics->pcie_link_speed = |
2419 | arcturus_get_current_pcie_link_speed(smu); |
2420 | |
2421 | gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); |
2422 | |
2423 | *table = (void *)gpu_metrics; |
2424 | |
2425 | return sizeof(struct gpu_metrics_v1_3); |
2426 | } |
2427 | |
2428 | static const struct pptable_funcs arcturus_ppt_funcs = { |
2429 | /* init dpm */ |
2430 | .get_allowed_feature_mask = arcturus_get_allowed_feature_mask, |
2431 | /* btc */ |
2432 | .run_btc = arcturus_run_btc, |
2433 | /* dpm/clk tables */ |
2434 | .set_default_dpm_table = arcturus_set_default_dpm_table, |
2435 | .populate_umd_state_clk = arcturus_populate_umd_state_clk, |
2436 | .get_thermal_temperature_range = arcturus_get_thermal_temperature_range, |
2437 | .print_clk_levels = arcturus_print_clk_levels, |
2438 | .force_clk_levels = arcturus_force_clk_levels, |
2439 | .read_sensor = arcturus_read_sensor, |
2440 | .get_fan_speed_pwm = arcturus_get_fan_speed_pwm, |
2441 | .get_fan_speed_rpm = arcturus_get_fan_speed_rpm, |
2442 | .get_power_profile_mode = arcturus_get_power_profile_mode, |
2443 | .set_power_profile_mode = arcturus_set_power_profile_mode, |
2444 | .set_performance_level = arcturus_set_performance_level, |
2445 | /* debug (internal used) */ |
2446 | .dump_pptable = arcturus_dump_pptable, |
2447 | .get_power_limit = arcturus_get_power_limit, |
2448 | .is_dpm_running = arcturus_is_dpm_running, |
2449 | .dpm_set_vcn_enable = arcturus_dpm_set_vcn_enable, |
2450 | .i2c_init = arcturus_i2c_control_init, |
2451 | .i2c_fini = arcturus_i2c_control_fini, |
2452 | .get_unique_id = arcturus_get_unique_id, |
2453 | .init_microcode = smu_v11_0_init_microcode, |
2454 | .load_microcode = smu_v11_0_load_microcode, |
2455 | .fini_microcode = smu_v11_0_fini_microcode, |
2456 | .init_smc_tables = arcturus_init_smc_tables, |
2457 | .fini_smc_tables = smu_v11_0_fini_smc_tables, |
2458 | .init_power = smu_v11_0_init_power, |
2459 | .fini_power = smu_v11_0_fini_power, |
2460 | .check_fw_status = smu_v11_0_check_fw_status, |
2461 | /* pptable related */ |
2462 | .setup_pptable = arcturus_setup_pptable, |
2463 | .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, |
2464 | .check_fw_version = smu_v11_0_check_fw_version, |
2465 | .write_pptable = smu_cmn_write_pptable, |
2466 | .set_driver_table_location = smu_v11_0_set_driver_table_location, |
2467 | .set_tool_table_location = smu_v11_0_set_tool_table_location, |
2468 | .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, |
2469 | .system_features_control = smu_v11_0_system_features_control, |
2470 | .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, |
2471 | .send_smc_msg = smu_cmn_send_smc_msg, |
2472 | .init_display_count = NULL((void *)0), |
2473 | .set_allowed_mask = smu_v11_0_set_allowed_mask, |
2474 | .get_enabled_mask = smu_cmn_get_enabled_mask, |
2475 | .feature_is_enabled = smu_cmn_feature_is_enabled, |
2476 | .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception, |
2477 | .notify_display_change = NULL((void *)0), |
2478 | .set_power_limit = smu_v11_0_set_power_limit, |
2479 | .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, |
2480 | .enable_thermal_alert = smu_v11_0_enable_thermal_alert, |
2481 | .disable_thermal_alert = smu_v11_0_disable_thermal_alert, |
2482 | .set_min_dcef_deep_sleep = NULL((void *)0), |
2483 | .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, |
2484 | .get_fan_control_mode = smu_v11_0_get_fan_control_mode, |
2485 | .set_fan_control_mode = smu_v11_0_set_fan_control_mode, |
2486 | .set_fan_speed_pwm = arcturus_set_fan_speed_pwm, |
2487 | .set_fan_speed_rpm = arcturus_set_fan_speed_rpm, |
2488 | .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, |
2489 | .gfx_off_control = smu_v11_0_gfx_off_control, |
2490 | .register_irq_handler = smu_v11_0_register_irq_handler, |
2491 | .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, |
2492 | .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, |
2493 | .baco_is_support = smu_v11_0_baco_is_support, |
2494 | .baco_get_state = smu_v11_0_baco_get_state, |
2495 | .baco_set_state = smu_v11_0_baco_set_state, |
2496 | .baco_enter = smu_v11_0_baco_enter, |
2497 | .baco_exit = smu_v11_0_baco_exit, |
2498 | .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, |
2499 | .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, |
2500 | .set_df_cstate = arcturus_set_df_cstate, |
2501 | .allow_xgmi_power_down = arcturus_allow_xgmi_power_down, |
2502 | .log_thermal_throttling_event = arcturus_log_thermal_throttling_event, |
2503 | .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, |
2504 | .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, |
2505 | .get_gpu_metrics = arcturus_get_gpu_metrics, |
2506 | .gfx_ulv_control = smu_v11_0_gfx_ulv_control, |
2507 | .deep_sleep_control = smu_v11_0_deep_sleep_control, |
2508 | .get_fan_parameters = arcturus_get_fan_parameters, |
2509 | .interrupt_work = smu_v11_0_interrupt_work, |
2510 | .smu_handle_passthrough_sbr = smu_v11_0_handle_passthrough_sbr, |
2511 | .set_mp1_state = smu_cmn_set_mp1_state, |
2512 | }; |
2513 | |
2514 | void arcturus_set_ppt_funcs(struct smu_context *smu) |
2515 | { |
2516 | smu->ppt_funcs = &arcturus_ppt_funcs; |
2517 | smu->message_map = arcturus_message_map; |
2518 | smu->clock_map = arcturus_clk_map; |
2519 | smu->feature_map = arcturus_feature_mask_map; |
2520 | smu->table_map = arcturus_table_map; |
2521 | smu->pwr_src_map = arcturus_pwr_src_map; |
2522 | smu->workload_map = arcturus_workload_map; |
2523 | smu_v11_0_set_smu_mailbox_registers(smu); |
2524 | } |