File: | dev/pci/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c |
Warning: | line 697, column 20 Value stored to 'skutable' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2021 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #define SWSMU_CODE_LAYER_L2 |
25 | |
26 | #include <linux/firmware.h> |
27 | #include <linux/pci.h> |
28 | #include <linux/i2c.h> |
29 | #include "amdgpu.h" |
30 | #include "amdgpu_smu.h" |
31 | #include "atomfirmware.h" |
32 | #include "amdgpu_atomfirmware.h" |
33 | #include "amdgpu_atombios.h" |
34 | #include "smu_v13_0.h" |
35 | #include "smu13_driver_if_v13_0_7.h" |
36 | #include "soc15_common.h" |
37 | #include "atom.h" |
38 | #include "smu_v13_0_7_ppt.h" |
39 | #include "smu_v13_0_7_pptable.h" |
40 | #include "smu_v13_0_7_ppsmc.h" |
41 | #include "nbio/nbio_4_3_0_offset.h" |
42 | #include "nbio/nbio_4_3_0_sh_mask.h" |
43 | #include "mp/mp_13_0_0_offset.h" |
44 | #include "mp/mp_13_0_0_sh_mask.h" |
45 | |
46 | #include "asic_reg/mp/mp_13_0_0_sh_mask.h" |
47 | #include "smu_cmn.h" |
48 | #include "amdgpu_ras.h" |
49 | |
50 | /* |
51 | * DO NOT use these for err/warn/info/debug messages. |
52 | * Use dev_err, dev_warn, dev_info and dev_dbg instead. |
53 | * They are more MGPU friendly. |
54 | */ |
55 | #undef pr_err |
56 | #undef pr_warn |
57 | #undef pr_info |
58 | #undef pr_debug |
59 | |
60 | #define to_amdgpu_device(x)(({ const __typeof( ((struct amdgpu_device *)0)->pm.smu_i2c ) *__mptr = (x); (struct amdgpu_device *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_device, pm.smu_i2c) );})) (container_of(x, struct amdgpu_device, pm.smu_i2c)({ const __typeof( ((struct amdgpu_device *)0)->pm.smu_i2c ) *__mptr = (x); (struct amdgpu_device *)( (char *)__mptr - __builtin_offsetof (struct amdgpu_device, pm.smu_i2c) );})) |
61 | |
62 | #define FEATURE_MASK(feature)(1ULL << feature) (1ULL << feature) |
63 | #define SMC_DPM_FEATURE( (1ULL << 1) | (1ULL << 3) | (1ULL << 7) | (1ULL << 5) | (1ULL << 4) | (1ULL << 6)) ( \ |
64 | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)(1ULL << 1) | \ |
65 | FEATURE_MASK(FEATURE_DPM_UCLK_BIT)(1ULL << 3) | \ |
66 | FEATURE_MASK(FEATURE_DPM_LINK_BIT)(1ULL << 7) | \ |
67 | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)(1ULL << 5) | \ |
68 | FEATURE_MASK(FEATURE_DPM_FCLK_BIT)(1ULL << 4) | \ |
69 | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)(1ULL << 6)) |
70 | |
71 | #define smnMP1_FIRMWARE_FLAGS_SMU_13_0_70x3b10028 0x3b10028 |
72 | |
73 | #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE0x4000 0x4000 |
74 | |
75 | static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = { |
76 | MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1)[SMU_MSG_TestMessage] = {1, (0x1), (1)}, |
77 | MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1)[SMU_MSG_GetSmuVersion] = {1, (0x2), (1)}, |
78 | MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1)[SMU_MSG_GetDriverIfVersion] = {1, (0x3), (1)}, |
79 | MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0)[SMU_MSG_SetAllowedFeaturesMaskLow] = {1, (0x4), (0)}, |
80 | MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0)[SMU_MSG_SetAllowedFeaturesMaskHigh] = {1, (0x5), (0)}, |
81 | MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0)[SMU_MSG_EnableAllSmuFeatures] = {1, (0x6), (0)}, |
82 | MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0)[SMU_MSG_DisableAllSmuFeatures] = {1, (0x7), (0)}, |
83 | MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1)[SMU_MSG_EnableSmuFeaturesLow] = {1, (0x8), (1)}, |
84 | MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1)[SMU_MSG_EnableSmuFeaturesHigh] = {1, (0x9), (1)}, |
85 | MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1)[SMU_MSG_DisableSmuFeaturesLow] = {1, (0xA), (1)}, |
86 | MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1)[SMU_MSG_DisableSmuFeaturesHigh] = {1, (0xB), (1)}, |
87 | MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1)[SMU_MSG_GetEnabledSmuFeaturesLow] = {1, (0xC), (1)}, |
88 | MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1)[SMU_MSG_GetEnabledSmuFeaturesHigh] = {1, (0xD), (1)}, |
89 | MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1)[SMU_MSG_SetWorkloadMask] = {1, (0x24), (1)}, |
90 | MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0)[SMU_MSG_SetPptLimit] = {1, (0x32), (0)}, |
91 | MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1)[SMU_MSG_SetDriverDramAddrHigh] = {1, (0xE), (1)}, |
92 | MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1)[SMU_MSG_SetDriverDramAddrLow] = {1, (0xF), (1)}, |
93 | MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0)[SMU_MSG_SetToolsDramAddrHigh] = {1, (0x10), (0)}, |
94 | MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0)[SMU_MSG_SetToolsDramAddrLow] = {1, (0x11), (0)}, |
95 | MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1)[SMU_MSG_TransferTableSmu2Dram] = {1, (0x12), (1)}, |
96 | MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0)[SMU_MSG_TransferTableDram2Smu] = {1, (0x13), (0)}, |
97 | MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0)[SMU_MSG_UseDefaultPPTable] = {1, (0x14), (0)}, |
98 | MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0)[SMU_MSG_RunDcBtc] = {1, (0x36), (0)}, |
99 | MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0)[SMU_MSG_EnterBaco] = {1, (0x15), (0)}, |
100 | MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0)[SMU_MSG_ExitBaco] = {1, (0x16), (0)}, |
101 | MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1)[SMU_MSG_SetSoftMinByFreq] = {1, (0x19), (1)}, |
102 | MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1)[SMU_MSG_SetSoftMaxByFreq] = {1, (0x1A), (1)}, |
103 | MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1)[SMU_MSG_SetHardMinByFreq] = {1, (0x1B), (1)}, |
104 | MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0)[SMU_MSG_SetHardMaxByFreq] = {1, (0x1C), (0)}, |
105 | MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1)[SMU_MSG_GetMinDpmFreq] = {1, (0x1D), (1)}, |
106 | MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1)[SMU_MSG_GetMaxDpmFreq] = {1, (0x1E), (1)}, |
107 | MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1)[SMU_MSG_GetDpmFreqByIndex] = {1, (0x1F), (1)}, |
108 | MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0)[SMU_MSG_PowerUpVcn] = {1, (0x2A), (0)}, |
109 | MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0)[SMU_MSG_PowerDownVcn] = {1, (0x2B), (0)}, |
110 | MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0)[SMU_MSG_PowerUpJpeg] = {1, (0x2C), (0)}, |
111 | MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0)[SMU_MSG_PowerDownJpeg] = {1, (0x2D), (0)}, |
112 | MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1)[SMU_MSG_GetDcModeMaxDpmFreq] = {1, (0x27), (1)}, |
113 | MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0)[SMU_MSG_OverridePcieParameters] = {1, (0x20), (0)}, |
114 | MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0)[SMU_MSG_ReenableAcDcInterrupt] = {1, (0x34), (0)}, |
115 | MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0)[SMU_MSG_AllowIHHostInterrupt] = {1, (0x4C), (0)}, |
116 | MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0)[SMU_MSG_DramLogSetDramAddrHigh] = {1, (0x21), (0)}, |
117 | MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0)[SMU_MSG_DramLogSetDramAddrLow] = {1, (0x22), (0)}, |
118 | MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0)[SMU_MSG_DramLogSetDramSize] = {1, (0x23), (0)}, |
119 | MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0)[SMU_MSG_AllowGfxOff] = {1, (0x28), (0)}, |
120 | MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0)[SMU_MSG_DisallowGfxOff] = {1, (0x29), (0)}, |
121 | MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0)[SMU_MSG_Mode1Reset] = {1, (0x2F), (0)}, |
122 | MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0)[SMU_MSG_PrepareMp1ForUnload] = {1, (0x2E), (0)}, |
123 | MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0)[SMU_MSG_SetMGpuFanBoostLimitRpm] = {1, (0x3C), (0)}, |
124 | MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0)[SMU_MSG_DFCstateControl] = {1, (0x3B), (0)}, |
125 | MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0)[SMU_MSG_ArmD3] = {1, (0x17), (0)}, |
126 | MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0)[SMU_MSG_AllowGpo] = {1, (0x41), (0)}, |
127 | MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0)[SMU_MSG_GetPptLimit] = {1, (0x33), (0)}, |
128 | MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0)[SMU_MSG_NotifyPowerSource] = {1, (0x35), (0)}, |
129 | }; |
130 | |
131 | static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { |
132 | CLK_MAP(GFXCLK, PPCLK_GFXCLK)[SMU_GFXCLK] = {1, (PPCLK_GFXCLK)}, |
133 | CLK_MAP(SCLK, PPCLK_GFXCLK)[SMU_SCLK] = {1, (PPCLK_GFXCLK)}, |
134 | CLK_MAP(SOCCLK, PPCLK_SOCCLK)[SMU_SOCCLK] = {1, (PPCLK_SOCCLK)}, |
135 | CLK_MAP(FCLK, PPCLK_FCLK)[SMU_FCLK] = {1, (PPCLK_FCLK)}, |
136 | CLK_MAP(UCLK, PPCLK_UCLK)[SMU_UCLK] = {1, (PPCLK_UCLK)}, |
137 | CLK_MAP(MCLK, PPCLK_UCLK)[SMU_MCLK] = {1, (PPCLK_UCLK)}, |
138 | CLK_MAP(VCLK, PPCLK_VCLK_0)[SMU_VCLK] = {1, (PPCLK_VCLK_0)}, |
139 | CLK_MAP(VCLK1, PPCLK_VCLK_1)[SMU_VCLK1] = {1, (PPCLK_VCLK_1)}, |
140 | CLK_MAP(DCLK, PPCLK_DCLK_0)[SMU_DCLK] = {1, (PPCLK_DCLK_0)}, |
141 | CLK_MAP(DCLK1, PPCLK_DCLK_1)[SMU_DCLK1] = {1, (PPCLK_DCLK_1)}, |
142 | }; |
143 | |
144 | static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] = { |
145 | FEA_MAP(FW_DATA_READ)[SMU_FEATURE_FW_DATA_READ_BIT] = {1, 0}, |
146 | FEA_MAP(DPM_GFXCLK)[SMU_FEATURE_DPM_GFXCLK_BIT] = {1, 1}, |
147 | FEA_MAP(DPM_GFX_POWER_OPTIMIZER)[SMU_FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT] = {1, 2}, |
148 | FEA_MAP(DPM_UCLK)[SMU_FEATURE_DPM_UCLK_BIT] = {1, 3}, |
149 | FEA_MAP(DPM_FCLK)[SMU_FEATURE_DPM_FCLK_BIT] = {1, 4}, |
150 | FEA_MAP(DPM_SOCCLK)[SMU_FEATURE_DPM_SOCCLK_BIT] = {1, 5}, |
151 | FEA_MAP(DPM_MP0CLK)[SMU_FEATURE_DPM_MP0CLK_BIT] = {1, 6}, |
152 | FEA_MAP(DPM_LINK)[SMU_FEATURE_DPM_LINK_BIT] = {1, 7}, |
153 | FEA_MAP(DPM_DCN)[SMU_FEATURE_DPM_DCN_BIT] = {1, 8}, |
154 | FEA_MAP(VMEMP_SCALING)[SMU_FEATURE_VMEMP_SCALING_BIT] = {1, 9}, |
155 | FEA_MAP(VDDIO_MEM_SCALING)[SMU_FEATURE_VDDIO_MEM_SCALING_BIT] = {1, 10}, |
156 | FEA_MAP(DS_GFXCLK)[SMU_FEATURE_DS_GFXCLK_BIT] = {1, 11}, |
157 | FEA_MAP(DS_SOCCLK)[SMU_FEATURE_DS_SOCCLK_BIT] = {1, 12}, |
158 | FEA_MAP(DS_FCLK)[SMU_FEATURE_DS_FCLK_BIT] = {1, 13}, |
159 | FEA_MAP(DS_LCLK)[SMU_FEATURE_DS_LCLK_BIT] = {1, 14}, |
160 | FEA_MAP(DS_DCFCLK)[SMU_FEATURE_DS_DCFCLK_BIT] = {1, 15}, |
161 | FEA_MAP(DS_UCLK)[SMU_FEATURE_DS_UCLK_BIT] = {1, 16}, |
162 | FEA_MAP(GFX_ULV)[SMU_FEATURE_GFX_ULV_BIT] = {1, 17}, |
163 | FEA_MAP(FW_DSTATE)[SMU_FEATURE_FW_DSTATE_BIT] = {1, 18}, |
164 | FEA_MAP(GFXOFF)[SMU_FEATURE_GFXOFF_BIT] = {1, 19}, |
165 | FEA_MAP(BACO)[SMU_FEATURE_BACO_BIT] = {1, 20}, |
166 | FEA_MAP(MM_DPM)[SMU_FEATURE_MM_DPM_BIT] = {1, 21}, |
167 | FEA_MAP(SOC_MPCLK_DS)[SMU_FEATURE_SOC_MPCLK_DS_BIT] = {1, 22}, |
168 | FEA_MAP(BACO_MPCLK_DS)[SMU_FEATURE_BACO_MPCLK_DS_BIT] = {1, 23}, |
169 | FEA_MAP(THROTTLERS)[SMU_FEATURE_THROTTLERS_BIT] = {1, 24}, |
170 | FEA_MAP(SMARTSHIFT)[SMU_FEATURE_SMARTSHIFT_BIT] = {1, 25}, |
171 | FEA_MAP(GTHR)[SMU_FEATURE_GTHR_BIT] = {1, 26}, |
172 | FEA_MAP(ACDC)[SMU_FEATURE_ACDC_BIT] = {1, 27}, |
173 | FEA_MAP(VR0HOT)[SMU_FEATURE_VR0HOT_BIT] = {1, 28}, |
174 | FEA_MAP(FW_CTF)[SMU_FEATURE_FW_CTF_BIT] = {1, 29}, |
175 | FEA_MAP(FAN_CONTROL)[SMU_FEATURE_FAN_CONTROL_BIT] = {1, 30}, |
176 | FEA_MAP(GFX_DCS)[SMU_FEATURE_GFX_DCS_BIT] = {1, 31}, |
177 | FEA_MAP(GFX_READ_MARGIN)[SMU_FEATURE_GFX_READ_MARGIN_BIT] = {1, 32}, |
178 | FEA_MAP(LED_DISPLAY)[SMU_FEATURE_LED_DISPLAY_BIT] = {1, 33}, |
179 | FEA_MAP(GFXCLK_SPREAD_SPECTRUM)[SMU_FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT] = {1, 34}, |
180 | FEA_MAP(OUT_OF_BAND_MONITOR)[SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT] = {1, 35}, |
181 | FEA_MAP(OPTIMIZED_VMIN)[SMU_FEATURE_OPTIMIZED_VMIN_BIT] = {1, 36}, |
182 | FEA_MAP(GFX_IMU)[SMU_FEATURE_GFX_IMU_BIT] = {1, 37}, |
183 | FEA_MAP(BOOT_TIME_CAL)[SMU_FEATURE_BOOT_TIME_CAL_BIT] = {1, 38}, |
184 | FEA_MAP(GFX_PCC_DFLL)[SMU_FEATURE_GFX_PCC_DFLL_BIT] = {1, 39}, |
185 | FEA_MAP(SOC_CG)[SMU_FEATURE_SOC_CG_BIT] = {1, 40}, |
186 | FEA_MAP(DF_CSTATE)[SMU_FEATURE_DF_CSTATE_BIT] = {1, 41}, |
187 | FEA_MAP(GFX_EDC)[SMU_FEATURE_GFX_EDC_BIT] = {1, 42}, |
188 | FEA_MAP(BOOT_POWER_OPT)[SMU_FEATURE_BOOT_POWER_OPT_BIT] = {1, 43}, |
189 | FEA_MAP(CLOCK_POWER_DOWN_BYPASS)[SMU_FEATURE_CLOCK_POWER_DOWN_BYPASS_BIT] = {1, 44}, |
190 | FEA_MAP(DS_VCN)[SMU_FEATURE_DS_VCN_BIT] = {1, 45}, |
191 | FEA_MAP(BACO_CG)[SMU_FEATURE_BACO_CG_BIT] = {1, 46}, |
192 | FEA_MAP(MEM_TEMP_READ)[SMU_FEATURE_MEM_TEMP_READ_BIT] = {1, 47}, |
193 | FEA_MAP(ATHUB_MMHUB_PG)[SMU_FEATURE_ATHUB_MMHUB_PG_BIT] = {1, 48}, |
194 | FEA_MAP(SOC_PCC)[SMU_FEATURE_SOC_PCC_BIT] = {1, 49}, |
195 | [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT21}, |
196 | [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT21}, |
197 | [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT24}, |
198 | }; |
199 | |
200 | static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = { |
201 | TAB_MAP(PPTABLE)[SMU_TABLE_PPTABLE] = {1, 0}, |
202 | TAB_MAP(WATERMARKS)[SMU_TABLE_WATERMARKS] = {1, 2}, |
203 | TAB_MAP(AVFS_PSM_DEBUG)[SMU_TABLE_AVFS_PSM_DEBUG] = {1, 3}, |
204 | TAB_MAP(PMSTATUSLOG)[SMU_TABLE_PMSTATUSLOG] = {1, 4}, |
205 | TAB_MAP(SMU_METRICS)[SMU_TABLE_SMU_METRICS] = {1, 5}, |
206 | TAB_MAP(DRIVER_SMU_CONFIG)[SMU_TABLE_DRIVER_SMU_CONFIG] = {1, 6}, |
207 | TAB_MAP(ACTIVITY_MONITOR_COEFF)[SMU_TABLE_ACTIVITY_MONITOR_COEFF] = {1, 7}, |
208 | [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE1}, |
209 | }; |
210 | |
211 | static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { |
212 | PWR_MAP(AC)[SMU_POWER_SOURCE_AC] = {1, POWER_SOURCE_AC}, |
213 | PWR_MAP(DC)[SMU_POWER_SOURCE_DC] = {1, POWER_SOURCE_DC}, |
214 | }; |
215 | |
216 | static struct cmn2asic_mapping smu_v13_0_7_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { |
217 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT)[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = {1, (0)}, |
218 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT)[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = {1, (1)}, |
219 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT)[PP_SMC_POWER_PROFILE_POWERSAVING] = {1, (2)}, |
220 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT)[PP_SMC_POWER_PROFILE_VIDEO] = {1, (3)}, |
221 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT)[PP_SMC_POWER_PROFILE_VR] = {1, (4)}, |
222 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT)[PP_SMC_POWER_PROFILE_COMPUTE] = {1, (5)}, |
223 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT)[PP_SMC_POWER_PROFILE_CUSTOM] = {1, (6)}, |
224 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT)[PP_SMC_POWER_PROFILE_WINDOW3D] = {1, (7)}, |
225 | }; |
226 | |
227 | static const uint8_t smu_v13_0_7_throttler_map[] = { |
228 | [THROTTLER_PPT0_BIT16] = (SMU_THROTTLER_PPT0_BIT0), |
229 | [THROTTLER_PPT1_BIT17] = (SMU_THROTTLER_PPT1_BIT1), |
230 | [THROTTLER_PPT2_BIT18] = (SMU_THROTTLER_PPT2_BIT2), |
231 | [THROTTLER_PPT3_BIT19] = (SMU_THROTTLER_PPT3_BIT3), |
232 | [THROTTLER_TDC_GFX_BIT13] = (SMU_THROTTLER_TDC_GFX_BIT16), |
233 | [THROTTLER_TDC_SOC_BIT14] = (SMU_THROTTLER_TDC_SOC_BIT17), |
234 | [THROTTLER_TEMP_EDGE_BIT0] = (SMU_THROTTLER_TEMP_EDGE_BIT35), |
235 | [THROTTLER_TEMP_HOTSPOT_BIT1] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT36), |
236 | [THROTTLER_TEMP_MEM_BIT4] = (SMU_THROTTLER_TEMP_MEM_BIT34), |
237 | [THROTTLER_TEMP_VR_GFX_BIT5] = (SMU_THROTTLER_TEMP_VR_GFX_BIT38), |
238 | [THROTTLER_TEMP_VR_SOC_BIT8] = (SMU_THROTTLER_TEMP_VR_SOC_BIT39), |
239 | [THROTTLER_TEMP_VR_MEM0_BIT6] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT40), |
240 | [THROTTLER_TEMP_VR_MEM1_BIT7] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT41), |
241 | [THROTTLER_TEMP_LIQUID0_BIT10] = (SMU_THROTTLER_TEMP_LIQUID0_BIT42), |
242 | [THROTTLER_TEMP_LIQUID1_BIT11] = (SMU_THROTTLER_TEMP_LIQUID1_BIT43), |
243 | [THROTTLER_GFX_APCC_PLUS_BIT21] = (SMU_THROTTLER_APCC_BIT23), |
244 | [THROTTLER_FIT_BIT20] = (SMU_THROTTLER_FIT_BIT57), |
245 | }; |
246 | |
247 | static int |
248 | smu_v13_0_7_get_allowed_feature_mask(struct smu_context *smu, |
249 | uint32_t *feature_mask, uint32_t num) |
250 | { |
251 | struct amdgpu_device *adev = smu->adev; |
252 | |
253 | if (num > 2) |
254 | return -EINVAL22; |
255 | |
256 | memset(feature_mask, 0, sizeof(uint32_t) * num)__builtin_memset((feature_mask), (0), (sizeof(uint32_t) * num )); |
257 | |
258 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT)(1ULL << 0); |
259 | |
260 | if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) { |
261 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)(1ULL << 1); |
262 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT)(1ULL << 37); |
263 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT)(1ULL << 2); |
264 | } |
265 | |
266 | if (adev->pm.pp_feature & PP_GFXOFF_MASK) |
267 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT)(1ULL << 19); |
268 | |
269 | if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) { |
270 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)(1ULL << 3); |
271 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT)(1ULL << 4); |
272 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT)(1ULL << 9); |
273 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT)(1ULL << 10); |
274 | } |
275 | |
276 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)(1ULL << 5); |
277 | |
278 | if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) |
279 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT)(1ULL << 7); |
280 | |
281 | if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) |
282 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT)(1ULL << 11); |
283 | |
284 | if (adev->pm.pp_feature & PP_ULV_MASK) |
285 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT)(1ULL << 17); |
286 | |
287 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT)(1ULL << 14); |
288 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)(1ULL << 6); |
289 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT)(1ULL << 21); |
290 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT)(1ULL << 45); |
291 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT)(1ULL << 13); |
292 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT)(1ULL << 41); |
293 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT)(1ULL << 24); |
294 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT)(1ULL << 28); |
295 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT)(1ULL << 29); |
296 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)(1ULL << 30); |
297 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)(1ULL << 12); |
298 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT)(1ULL << 34); |
299 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT)(1ULL << 47); |
300 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT)(1ULL << 18); |
301 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT)(1ULL << 22); |
302 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT)(1ULL << 23); |
303 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_PCC_DFLL_BIT)(1ULL << 39); |
304 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT)(1ULL << 40); |
305 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT)(1ULL << 20); |
306 | |
307 | if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK) |
308 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT)(1ULL << 8); |
309 | |
310 | if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB(1 << 16)) && |
311 | (adev->pg_flags & AMD_PG_SUPPORT_MMHUB(1 << 13))) |
312 | *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT)(1ULL << 48); |
313 | |
314 | return 0; |
315 | } |
316 | |
317 | static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu) |
318 | { |
319 | struct smu_table_context *table_context = &smu->smu_table; |
320 | struct smu_13_0_7_powerplay_table *powerplay_table = |
321 | table_context->power_play_table; |
322 | struct smu_baco_context *smu_baco = &smu->smu_baco; |
323 | PPTable_t *smc_pptable = table_context->driver_pptable; |
324 | BoardTable_t *BoardTable = &smc_pptable->BoardTable; |
325 | |
326 | if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC0x4) |
327 | smu->dc_controlled_by_gpio = true1; |
328 | |
329 | if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO0x8) { |
330 | smu_baco->platform_support = true1; |
331 | |
332 | if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO0x10) |
333 | && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) |
334 | smu_baco->maco_support = true1; |
335 | } |
336 | |
337 | table_context->thermal_controller_type = |
338 | powerplay_table->thermal_controller_type; |
339 | |
340 | /* |
341 | * Instead of having its own buffer space and get overdrive_table copied, |
342 | * smu->od_settings just points to the actual overdrive_table |
343 | */ |
344 | smu->od_settings = &powerplay_table->overdrive_table; |
345 | |
346 | return 0; |
347 | } |
348 | |
349 | static int smu_v13_0_7_store_powerplay_table(struct smu_context *smu) |
350 | { |
351 | struct smu_table_context *table_context = &smu->smu_table; |
352 | struct smu_13_0_7_powerplay_table *powerplay_table = |
353 | table_context->power_play_table; |
354 | struct amdgpu_device *adev = smu->adev; |
355 | |
356 | if (adev->pdev->device == 0x51) |
357 | powerplay_table->smc_pptable.SkuTable.DebugOverrides |= 0x00000080; |
358 | |
359 | memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,__builtin_memcpy((table_context->driver_pptable), (&powerplay_table ->smc_pptable), (sizeof(PPTable_t))) |
360 | sizeof(PPTable_t))__builtin_memcpy((table_context->driver_pptable), (&powerplay_table ->smc_pptable), (sizeof(PPTable_t))); |
361 | |
362 | return 0; |
363 | } |
364 | |
365 | static int smu_v13_0_7_check_fw_status(struct smu_context *smu) |
366 | { |
367 | struct amdgpu_device *adev = smu->adev; |
368 | uint32_t mp1_fw_flags; |
369 | |
370 | mp1_fw_flags = RREG32_PCIE(MP1_Public |adev->pcie_rreg(adev, (0x03b00000 | (0x3b10028 & 0xffffffff ))) |
371 | (smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 & 0xffffffff))adev->pcie_rreg(adev, (0x03b00000 | (0x3b10028 & 0xffffffff ))); |
372 | |
373 | if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK0x00000001L) >> |
374 | MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT0x0) |
375 | return 0; |
376 | |
377 | return -EIO5; |
378 | } |
379 | |
380 | #ifndef atom_smc_dpm_info_table_13_0_7 |
381 | struct atom_smc_dpm_info_table_13_0_7 |
382 | { |
383 | struct atom_common_table_header table_header; |
384 | BoardTable_t BoardTable; |
385 | }; |
386 | #endif |
387 | |
388 | static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu) |
389 | { |
390 | struct smu_table_context *table_context = &smu->smu_table; |
391 | |
392 | PPTable_t *smc_pptable = table_context->driver_pptable; |
393 | |
394 | struct atom_smc_dpm_info_table_13_0_7 *smc_dpm_table; |
395 | |
396 | BoardTable_t *BoardTable = &smc_pptable->BoardTable; |
397 | |
398 | int index, ret; |
399 | |
400 | index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,(__builtin_offsetof(struct atom_master_list_of_data_tables_v2_1 , smc_dpm_info) / sizeof(uint16_t)) |
401 | smc_dpm_info)(__builtin_offsetof(struct atom_master_list_of_data_tables_v2_1 , smc_dpm_info) / sizeof(uint16_t)); |
402 | |
403 | ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL((void *)0), NULL((void *)0), NULL((void *)0), |
404 | (uint8_t **)&smc_dpm_table); |
405 | if (ret) |
406 | return ret; |
407 | |
408 | memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t))__builtin_memcpy((BoardTable), (&smc_dpm_table->BoardTable ), (sizeof(BoardTable_t))); |
409 | |
410 | return 0; |
411 | } |
412 | |
413 | static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu, |
414 | void **table, |
415 | uint32_t *size) |
416 | { |
417 | struct smu_table_context *smu_table = &smu->smu_table; |
418 | void *combo_pptable = smu_table->combo_pptable; |
419 | int ret = 0; |
420 | |
421 | ret = smu_cmn_get_combo_pptable(smu); |
422 | if (ret) |
423 | return ret; |
424 | |
425 | *table = combo_pptable; |
426 | *size = sizeof(struct smu_13_0_7_powerplay_table); |
427 | |
428 | return 0; |
429 | } |
430 | |
431 | static int smu_v13_0_7_setup_pptable(struct smu_context *smu) |
432 | { |
433 | struct smu_table_context *smu_table = &smu->smu_table; |
434 | struct amdgpu_device *adev = smu->adev; |
435 | int ret = 0; |
436 | |
437 | /* |
438 | * With SCPM enabled, the pptable used will be signed. It cannot |
439 | * be used directly by driver. To get the raw pptable, we need to |
440 | * rely on the combo pptable(and its revelant SMU message). |
441 | */ |
442 | ret = smu_v13_0_7_get_pptable_from_pmfw(smu, |
443 | &smu_table->power_play_table, |
444 | &smu_table->power_play_table_size); |
445 | if (ret) |
446 | return ret; |
447 | |
448 | ret = smu_v13_0_7_store_powerplay_table(smu); |
449 | if (ret) |
450 | return ret; |
451 | |
452 | /* |
453 | * With SCPM enabled, the operation below will be handled |
454 | * by PSP. Driver involvment is unnecessary and useless. |
455 | */ |
456 | if (!adev->scpm_enabled) { |
457 | ret = smu_v13_0_7_append_powerplay_table(smu); |
458 | if (ret) |
459 | return ret; |
460 | } |
461 | |
462 | ret = smu_v13_0_7_check_powerplay_table(smu); |
463 | if (ret) |
464 | return ret; |
465 | |
466 | return ret; |
467 | } |
468 | |
469 | static int smu_v13_0_7_tables_init(struct smu_context *smu) |
470 | { |
471 | struct smu_table_context *smu_table = &smu->smu_table; |
472 | struct smu_table *tables = smu_table->tables; |
473 | |
474 | SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),do { tables[SMU_TABLE_PPTABLE].size = sizeof(PPTable_t); tables [SMU_TABLE_PPTABLE].align = (1 << 12); tables[SMU_TABLE_PPTABLE ].domain = 0x4; } while (0) |
475 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_PPTABLE].size = sizeof(PPTable_t); tables [SMU_TABLE_PPTABLE].align = (1 << 12); tables[SMU_TABLE_PPTABLE ].domain = 0x4; } while (0); |
476 | |
477 | SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),do { tables[SMU_TABLE_WATERMARKS].size = sizeof(Watermarks_t) ; tables[SMU_TABLE_WATERMARKS].align = (1 << 12); tables [SMU_TABLE_WATERMARKS].domain = 0x4; } while (0) |
478 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_WATERMARKS].size = sizeof(Watermarks_t) ; tables[SMU_TABLE_WATERMARKS].align = (1 << 12); tables [SMU_TABLE_WATERMARKS].domain = 0x4; } while (0); |
479 | SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),do { tables[SMU_TABLE_SMU_METRICS].size = sizeof(SmuMetricsExternal_t ); tables[SMU_TABLE_SMU_METRICS].align = (1 << 12); tables [SMU_TABLE_SMU_METRICS].domain = 0x4; } while (0) |
480 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_SMU_METRICS].size = sizeof(SmuMetricsExternal_t ); tables[SMU_TABLE_SMU_METRICS].align = (1 << 12); tables [SMU_TABLE_SMU_METRICS].domain = 0x4; } while (0); |
481 | SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),do { tables[SMU_TABLE_I2C_COMMANDS].size = sizeof(SwI2cRequest_t ); tables[SMU_TABLE_I2C_COMMANDS].align = (1 << 12); tables [SMU_TABLE_I2C_COMMANDS].domain = 0x4; } while (0) |
482 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_I2C_COMMANDS].size = sizeof(SwI2cRequest_t ); tables[SMU_TABLE_I2C_COMMANDS].align = (1 << 12); tables [SMU_TABLE_I2C_COMMANDS].domain = 0x4; } while (0); |
483 | SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),do { tables[SMU_TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t ); tables[SMU_TABLE_OVERDRIVE].align = (1 << 12); tables [SMU_TABLE_OVERDRIVE].domain = 0x4; } while (0) |
484 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t ); tables[SMU_TABLE_OVERDRIVE].align = (1 << 12); tables [SMU_TABLE_OVERDRIVE].domain = 0x4; } while (0); |
485 | SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,do { tables[SMU_TABLE_PMSTATUSLOG].size = 0x19000; tables[SMU_TABLE_PMSTATUSLOG ].align = (1 << 12); tables[SMU_TABLE_PMSTATUSLOG].domain = 0x4; } while (0) |
486 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_PMSTATUSLOG].size = 0x19000; tables[SMU_TABLE_PMSTATUSLOG ].align = (1 << 12); tables[SMU_TABLE_PMSTATUSLOG].domain = 0x4; } while (0); |
487 | SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,do { tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffIntExternal_t ); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].align = (1 << 12); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].domain = 0x4; } while (0) |
488 | sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,do { tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffIntExternal_t ); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].align = (1 << 12); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].domain = 0x4; } while (0) |
489 | AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffIntExternal_t ); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].align = (1 << 12); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].domain = 0x4; } while (0); |
490 | SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,do { tables[SMU_TABLE_COMBO_PPTABLE].size = 0x4000; tables[SMU_TABLE_COMBO_PPTABLE ].align = (1 << 12); tables[SMU_TABLE_COMBO_PPTABLE].domain = 0x4; } while (0) |
491 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_COMBO_PPTABLE].size = 0x4000; tables[SMU_TABLE_COMBO_PPTABLE ].align = (1 << 12); tables[SMU_TABLE_COMBO_PPTABLE].domain = 0x4; } while (0); |
492 | |
493 | smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL(0x0001 | 0x0004)); |
494 | if (!smu_table->metrics_table) |
495 | goto err0_out; |
496 | smu_table->metrics_time = 0; |
497 | |
498 | smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); |
499 | smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL(0x0001 | 0x0004)); |
500 | if (!smu_table->gpu_metrics_table) |
501 | goto err1_out; |
502 | |
503 | smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL(0x0001 | 0x0004)); |
504 | if (!smu_table->watermarks_table) |
505 | goto err2_out; |
506 | |
507 | return 0; |
508 | |
509 | err2_out: |
510 | kfree(smu_table->gpu_metrics_table); |
511 | err1_out: |
512 | kfree(smu_table->metrics_table); |
513 | err0_out: |
514 | return -ENOMEM12; |
515 | } |
516 | |
517 | static int smu_v13_0_7_allocate_dpm_context(struct smu_context *smu) |
518 | { |
519 | struct smu_dpm_context *smu_dpm = &smu->smu_dpm; |
520 | |
521 | smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), |
522 | GFP_KERNEL(0x0001 | 0x0004)); |
523 | if (!smu_dpm->dpm_context) |
524 | return -ENOMEM12; |
525 | |
526 | smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); |
527 | |
528 | return 0; |
529 | } |
530 | |
531 | static int smu_v13_0_7_init_smc_tables(struct smu_context *smu) |
532 | { |
533 | int ret = 0; |
534 | |
535 | ret = smu_v13_0_7_tables_init(smu); |
536 | if (ret) |
537 | return ret; |
538 | |
539 | ret = smu_v13_0_7_allocate_dpm_context(smu); |
540 | if (ret) |
541 | return ret; |
542 | |
543 | return smu_v13_0_init_smc_tables(smu); |
544 | } |
545 | |
546 | static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) |
547 | { |
548 | struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; |
549 | PPTable_t *driver_ppt = smu->smu_table.driver_pptable; |
550 | SkuTable_t *skutable = &driver_ppt->SkuTable; |
551 | struct smu_13_0_dpm_table *dpm_table; |
552 | struct smu_13_0_pcie_table *pcie_table; |
553 | uint32_t link_level; |
554 | int ret = 0; |
555 | |
556 | /* socclk dpm table setup */ |
557 | dpm_table = &dpm_context->dpm_tables.soc_table; |
558 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { |
559 | ret = smu_v13_0_set_single_dpm_table(smu, |
560 | SMU_SOCCLK, |
561 | dpm_table); |
562 | if (ret) |
563 | return ret; |
564 | } else { |
565 | dpm_table->count = 1; |
566 | dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; |
567 | dpm_table->dpm_levels[0].enabled = true1; |
568 | dpm_table->min = dpm_table->dpm_levels[0].value; |
569 | dpm_table->max = dpm_table->dpm_levels[0].value; |
570 | } |
571 | |
572 | /* gfxclk dpm table setup */ |
573 | dpm_table = &dpm_context->dpm_tables.gfx_table; |
574 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { |
575 | ret = smu_v13_0_set_single_dpm_table(smu, |
576 | SMU_GFXCLK, |
577 | dpm_table); |
578 | if (ret) |
579 | return ret; |
580 | |
581 | if (skutable->DriverReportedClocks.GameClockAc && |
582 | (dpm_table->dpm_levels[dpm_table->count - 1].value > |
583 | skutable->DriverReportedClocks.GameClockAc)) { |
584 | dpm_table->dpm_levels[dpm_table->count - 1].value = |
585 | skutable->DriverReportedClocks.GameClockAc; |
586 | dpm_table->max = skutable->DriverReportedClocks.GameClockAc; |
587 | } |
588 | } else { |
589 | dpm_table->count = 1; |
590 | dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; |
591 | dpm_table->dpm_levels[0].enabled = true1; |
592 | dpm_table->min = dpm_table->dpm_levels[0].value; |
593 | dpm_table->max = dpm_table->dpm_levels[0].value; |
594 | } |
595 | |
596 | /* uclk dpm table setup */ |
597 | dpm_table = &dpm_context->dpm_tables.uclk_table; |
598 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { |
599 | ret = smu_v13_0_set_single_dpm_table(smu, |
600 | SMU_UCLK, |
601 | dpm_table); |
602 | if (ret) |
603 | return ret; |
604 | } else { |
605 | dpm_table->count = 1; |
606 | dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; |
607 | dpm_table->dpm_levels[0].enabled = true1; |
608 | dpm_table->min = dpm_table->dpm_levels[0].value; |
609 | dpm_table->max = dpm_table->dpm_levels[0].value; |
610 | } |
611 | |
612 | /* fclk dpm table setup */ |
613 | dpm_table = &dpm_context->dpm_tables.fclk_table; |
614 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { |
615 | ret = smu_v13_0_set_single_dpm_table(smu, |
616 | SMU_FCLK, |
617 | dpm_table); |
618 | if (ret) |
619 | return ret; |
620 | } else { |
621 | dpm_table->count = 1; |
622 | dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; |
623 | dpm_table->dpm_levels[0].enabled = true1; |
624 | dpm_table->min = dpm_table->dpm_levels[0].value; |
625 | dpm_table->max = dpm_table->dpm_levels[0].value; |
626 | } |
627 | |
628 | /* vclk dpm table setup */ |
629 | dpm_table = &dpm_context->dpm_tables.vclk_table; |
630 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { |
631 | ret = smu_v13_0_set_single_dpm_table(smu, |
632 | SMU_VCLK, |
633 | dpm_table); |
634 | if (ret) |
635 | return ret; |
636 | } else { |
637 | dpm_table->count = 1; |
638 | dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; |
639 | dpm_table->dpm_levels[0].enabled = true1; |
640 | dpm_table->min = dpm_table->dpm_levels[0].value; |
641 | dpm_table->max = dpm_table->dpm_levels[0].value; |
642 | } |
643 | |
644 | /* dclk dpm table setup */ |
645 | dpm_table = &dpm_context->dpm_tables.dclk_table; |
646 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { |
647 | ret = smu_v13_0_set_single_dpm_table(smu, |
648 | SMU_DCLK, |
649 | dpm_table); |
650 | if (ret) |
651 | return ret; |
652 | } else { |
653 | dpm_table->count = 1; |
654 | dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; |
655 | dpm_table->dpm_levels[0].enabled = true1; |
656 | dpm_table->min = dpm_table->dpm_levels[0].value; |
657 | dpm_table->max = dpm_table->dpm_levels[0].value; |
658 | } |
659 | |
660 | /* lclk dpm table setup */ |
661 | pcie_table = &dpm_context->dpm_tables.pcie_table; |
662 | pcie_table->num_of_link_levels = 0; |
663 | for (link_level = 0; link_level < NUM_LINK_LEVELS3; link_level++) { |
664 | if (!skutable->PcieGenSpeed[link_level] && |
665 | !skutable->PcieLaneCount[link_level] && |
666 | !skutable->LclkFreq[link_level]) |
667 | continue; |
668 | |
669 | pcie_table->pcie_gen[pcie_table->num_of_link_levels] = |
670 | skutable->PcieGenSpeed[link_level]; |
671 | pcie_table->pcie_lane[pcie_table->num_of_link_levels] = |
672 | skutable->PcieLaneCount[link_level]; |
673 | pcie_table->clk_freq[pcie_table->num_of_link_levels] = |
674 | skutable->LclkFreq[link_level]; |
675 | pcie_table->num_of_link_levels++; |
676 | } |
677 | |
678 | return 0; |
679 | } |
680 | |
681 | static bool_Bool smu_v13_0_7_is_dpm_running(struct smu_context *smu) |
682 | { |
683 | int ret = 0; |
684 | uint64_t feature_enabled; |
685 | |
686 | ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); |
687 | if (ret) |
688 | return false0; |
689 | |
690 | return !!(feature_enabled & SMC_DPM_FEATURE( (1ULL << 1) | (1ULL << 3) | (1ULL << 7) | (1ULL << 5) | (1ULL << 4) | (1ULL << 6))); |
691 | } |
692 | |
693 | static void smu_v13_0_7_dump_pptable(struct smu_context *smu) |
694 | { |
695 | struct smu_table_context *table_context = &smu->smu_table; |
696 | PPTable_t *pptable = table_context->driver_pptable; |
697 | SkuTable_t *skutable = &pptable->SkuTable; |
Value stored to 'skutable' during its initialization is never read | |
698 | |
699 | dev_info(smu->adev->dev, "Dumped PPTable:\n")do { } while(0); |
700 | |
701 | dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version)do { } while(0); |
702 | dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0])do { } while(0); |
703 | dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1])do { } while(0); |
704 | } |
705 | |
706 | static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics) |
707 | { |
708 | uint32_t throttler_status = 0; |
709 | int i; |
710 | |
711 | for (i = 0; i < THROTTLER_COUNT22; i++) |
712 | throttler_status |= |
713 | (metrics->ThrottlingPercentage[i] ? 1U << i : 0); |
714 | |
715 | return throttler_status; |
716 | } |
717 | |
718 | #define SMU_13_0_7_BUSY_THRESHOLD15 15 |
719 | static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu, |
720 | MetricsMember_t member, |
721 | uint32_t *value) |
722 | { |
723 | struct smu_table_context *smu_table= &smu->smu_table; |
724 | SmuMetrics_t *metrics = |
725 | &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); |
726 | int ret = 0; |
727 | |
728 | ret = smu_cmn_get_metrics_table(smu, |
729 | NULL((void *)0), |
730 | false0); |
731 | if (ret) |
732 | return ret; |
733 | |
734 | switch (member) { |
735 | case METRICS_CURR_GFXCLK: |
736 | *value = metrics->CurrClock[PPCLK_GFXCLK]; |
737 | break; |
738 | case METRICS_CURR_SOCCLK: |
739 | *value = metrics->CurrClock[PPCLK_SOCCLK]; |
740 | break; |
741 | case METRICS_CURR_UCLK: |
742 | *value = metrics->CurrClock[PPCLK_UCLK]; |
743 | break; |
744 | case METRICS_CURR_VCLK: |
745 | *value = metrics->CurrClock[PPCLK_VCLK_0]; |
746 | break; |
747 | case METRICS_CURR_VCLK1: |
748 | *value = metrics->CurrClock[PPCLK_VCLK_1]; |
749 | break; |
750 | case METRICS_CURR_DCLK: |
751 | *value = metrics->CurrClock[PPCLK_DCLK_0]; |
752 | break; |
753 | case METRICS_CURR_DCLK1: |
754 | *value = metrics->CurrClock[PPCLK_DCLK_1]; |
755 | break; |
756 | case METRICS_CURR_FCLK: |
757 | *value = metrics->CurrClock[PPCLK_FCLK]; |
758 | break; |
759 | case METRICS_AVERAGE_GFXCLK: |
760 | *value = metrics->AverageGfxclkFrequencyPreDs; |
761 | break; |
762 | case METRICS_AVERAGE_FCLK: |
763 | if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD15) |
764 | *value = metrics->AverageFclkFrequencyPostDs; |
765 | else |
766 | *value = metrics->AverageFclkFrequencyPreDs; |
767 | break; |
768 | case METRICS_AVERAGE_UCLK: |
769 | if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD15) |
770 | *value = metrics->AverageMemclkFrequencyPostDs; |
771 | else |
772 | *value = metrics->AverageMemclkFrequencyPreDs; |
773 | break; |
774 | case METRICS_AVERAGE_VCLK: |
775 | *value = metrics->AverageVclk0Frequency; |
776 | break; |
777 | case METRICS_AVERAGE_DCLK: |
778 | *value = metrics->AverageDclk0Frequency; |
779 | break; |
780 | case METRICS_AVERAGE_VCLK1: |
781 | *value = metrics->AverageVclk1Frequency; |
782 | break; |
783 | case METRICS_AVERAGE_DCLK1: |
784 | *value = metrics->AverageDclk1Frequency; |
785 | break; |
786 | case METRICS_AVERAGE_GFXACTIVITY: |
787 | *value = metrics->AverageGfxActivity; |
788 | break; |
789 | case METRICS_AVERAGE_MEMACTIVITY: |
790 | *value = metrics->AverageUclkActivity; |
791 | break; |
792 | case METRICS_AVERAGE_SOCKETPOWER: |
793 | *value = metrics->AverageSocketPower << 8; |
794 | break; |
795 | case METRICS_TEMPERATURE_EDGE: |
796 | *value = metrics->AvgTemperature[TEMP_EDGE] * |
797 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
798 | break; |
799 | case METRICS_TEMPERATURE_HOTSPOT: |
800 | *value = metrics->AvgTemperature[TEMP_HOTSPOT] * |
801 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
802 | break; |
803 | case METRICS_TEMPERATURE_MEM: |
804 | *value = metrics->AvgTemperature[TEMP_MEM] * |
805 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
806 | break; |
807 | case METRICS_TEMPERATURE_VRGFX: |
808 | *value = metrics->AvgTemperature[TEMP_VR_GFX] * |
809 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
810 | break; |
811 | case METRICS_TEMPERATURE_VRSOC: |
812 | *value = metrics->AvgTemperature[TEMP_VR_SOC] * |
813 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
814 | break; |
815 | case METRICS_THROTTLER_STATUS: |
816 | *value = smu_v13_0_7_get_throttler_status(metrics); |
817 | break; |
818 | case METRICS_CURR_FANSPEED: |
819 | *value = metrics->AvgFanRpm; |
820 | break; |
821 | case METRICS_CURR_FANPWM: |
822 | *value = metrics->AvgFanPwm; |
823 | break; |
824 | case METRICS_VOLTAGE_VDDGFX: |
825 | *value = metrics->AvgVoltage[SVI_PLANE_GFX]; |
826 | break; |
827 | case METRICS_PCIE_RATE: |
828 | *value = metrics->PcieRate; |
829 | break; |
830 | case METRICS_PCIE_WIDTH: |
831 | *value = metrics->PcieWidth; |
832 | break; |
833 | default: |
834 | *value = UINT_MAX0xffffffffU; |
835 | break; |
836 | } |
837 | |
838 | return ret; |
839 | } |
840 | |
841 | static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu, |
842 | enum smu_clk_type clk_type, |
843 | uint32_t *min, |
844 | uint32_t *max) |
845 | { |
846 | struct smu_13_0_dpm_context *dpm_context = |
847 | smu->smu_dpm.dpm_context; |
848 | struct smu_13_0_dpm_table *dpm_table; |
849 | |
850 | switch (clk_type) { |
851 | case SMU_MCLK: |
852 | case SMU_UCLK: |
853 | /* uclk dpm table */ |
854 | dpm_table = &dpm_context->dpm_tables.uclk_table; |
855 | break; |
856 | case SMU_GFXCLK: |
857 | case SMU_SCLK: |
858 | /* gfxclk dpm table */ |
859 | dpm_table = &dpm_context->dpm_tables.gfx_table; |
860 | break; |
861 | case SMU_SOCCLK: |
862 | /* socclk dpm table */ |
863 | dpm_table = &dpm_context->dpm_tables.soc_table; |
864 | break; |
865 | case SMU_FCLK: |
866 | /* fclk dpm table */ |
867 | dpm_table = &dpm_context->dpm_tables.fclk_table; |
868 | break; |
869 | case SMU_VCLK: |
870 | case SMU_VCLK1: |
871 | /* vclk dpm table */ |
872 | dpm_table = &dpm_context->dpm_tables.vclk_table; |
873 | break; |
874 | case SMU_DCLK: |
875 | case SMU_DCLK1: |
876 | /* dclk dpm table */ |
877 | dpm_table = &dpm_context->dpm_tables.dclk_table; |
878 | break; |
879 | default: |
880 | dev_err(smu->adev->dev, "Unsupported clock type!\n")printf("drm:pid%d:%s *ERROR* " "Unsupported clock type!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
881 | return -EINVAL22; |
882 | } |
883 | |
884 | if (min) |
885 | *min = dpm_table->min; |
886 | if (max) |
887 | *max = dpm_table->max; |
888 | |
889 | return 0; |
890 | } |
891 | |
892 | static int smu_v13_0_7_read_sensor(struct smu_context *smu, |
893 | enum amd_pp_sensors sensor, |
894 | void *data, |
895 | uint32_t *size) |
896 | { |
897 | struct smu_table_context *table_context = &smu->smu_table; |
898 | PPTable_t *smc_pptable = table_context->driver_pptable; |
899 | int ret = 0; |
900 | |
901 | switch (sensor) { |
902 | case AMDGPU_PP_SENSOR_MAX_FAN_RPM: |
903 | *(uint16_t *)data = smc_pptable->SkuTable.FanMaximumRpm; |
904 | *size = 4; |
905 | break; |
906 | case AMDGPU_PP_SENSOR_MEM_LOAD: |
907 | ret = smu_v13_0_7_get_smu_metrics_data(smu, |
908 | METRICS_AVERAGE_MEMACTIVITY, |
909 | (uint32_t *)data); |
910 | *size = 4; |
911 | break; |
912 | case AMDGPU_PP_SENSOR_GPU_LOAD: |
913 | ret = smu_v13_0_7_get_smu_metrics_data(smu, |
914 | METRICS_AVERAGE_GFXACTIVITY, |
915 | (uint32_t *)data); |
916 | *size = 4; |
917 | break; |
918 | case AMDGPU_PP_SENSOR_GPU_POWER: |
919 | ret = smu_v13_0_7_get_smu_metrics_data(smu, |
920 | METRICS_AVERAGE_SOCKETPOWER, |
921 | (uint32_t *)data); |
922 | *size = 4; |
923 | break; |
924 | case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: |
925 | ret = smu_v13_0_7_get_smu_metrics_data(smu, |
926 | METRICS_TEMPERATURE_HOTSPOT, |
927 | (uint32_t *)data); |
928 | *size = 4; |
929 | break; |
930 | case AMDGPU_PP_SENSOR_EDGE_TEMP: |
931 | ret = smu_v13_0_7_get_smu_metrics_data(smu, |
932 | METRICS_TEMPERATURE_EDGE, |
933 | (uint32_t *)data); |
934 | *size = 4; |
935 | break; |
936 | case AMDGPU_PP_SENSOR_MEM_TEMP: |
937 | ret = smu_v13_0_7_get_smu_metrics_data(smu, |
938 | METRICS_TEMPERATURE_MEM, |
939 | (uint32_t *)data); |
940 | *size = 4; |
941 | break; |
942 | case AMDGPU_PP_SENSOR_GFX_MCLK: |
943 | ret = smu_v13_0_7_get_smu_metrics_data(smu, |
944 | METRICS_CURR_UCLK, |
945 | (uint32_t *)data); |
946 | *(uint32_t *)data *= 100; |
947 | *size = 4; |
948 | break; |
949 | case AMDGPU_PP_SENSOR_GFX_SCLK: |
950 | ret = smu_v13_0_7_get_smu_metrics_data(smu, |
951 | METRICS_AVERAGE_GFXCLK, |
952 | (uint32_t *)data); |
953 | *(uint32_t *)data *= 100; |
954 | *size = 4; |
955 | break; |
956 | case AMDGPU_PP_SENSOR_VDDGFX: |
957 | ret = smu_v13_0_7_get_smu_metrics_data(smu, |
958 | METRICS_VOLTAGE_VDDGFX, |
959 | (uint32_t *)data); |
960 | *size = 4; |
961 | break; |
962 | default: |
963 | ret = -EOPNOTSUPP45; |
964 | break; |
965 | } |
966 | |
967 | return ret; |
968 | } |
969 | |
970 | static int smu_v13_0_7_get_current_clk_freq_by_table(struct smu_context *smu, |
971 | enum smu_clk_type clk_type, |
972 | uint32_t *value) |
973 | { |
974 | MetricsMember_t member_type; |
975 | int clk_id = 0; |
976 | |
977 | clk_id = smu_cmn_to_asic_specific_index(smu, |
978 | CMN2ASIC_MAPPING_CLK, |
979 | clk_type); |
980 | if (clk_id < 0) |
981 | return -EINVAL22; |
982 | |
983 | switch (clk_id) { |
984 | case PPCLK_GFXCLK: |
985 | member_type = METRICS_AVERAGE_GFXCLK; |
986 | break; |
987 | case PPCLK_UCLK: |
988 | member_type = METRICS_CURR_UCLK; |
989 | break; |
990 | case PPCLK_FCLK: |
991 | member_type = METRICS_CURR_FCLK; |
992 | break; |
993 | case PPCLK_SOCCLK: |
994 | member_type = METRICS_CURR_SOCCLK; |
995 | break; |
996 | case PPCLK_VCLK_0: |
997 | member_type = METRICS_CURR_VCLK; |
998 | break; |
999 | case PPCLK_DCLK_0: |
1000 | member_type = METRICS_CURR_DCLK; |
1001 | break; |
1002 | case PPCLK_VCLK_1: |
1003 | member_type = METRICS_CURR_VCLK1; |
1004 | break; |
1005 | case PPCLK_DCLK_1: |
1006 | member_type = METRICS_CURR_DCLK1; |
1007 | break; |
1008 | default: |
1009 | return -EINVAL22; |
1010 | } |
1011 | |
1012 | return smu_v13_0_7_get_smu_metrics_data(smu, |
1013 | member_type, |
1014 | value); |
1015 | } |
1016 | |
1017 | static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, |
1018 | enum smu_clk_type clk_type, |
1019 | char *buf) |
1020 | { |
1021 | struct smu_dpm_context *smu_dpm = &smu->smu_dpm; |
1022 | struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; |
1023 | struct smu_13_0_dpm_table *single_dpm_table; |
1024 | struct smu_13_0_pcie_table *pcie_table; |
1025 | uint32_t gen_speed, lane_width; |
1026 | int i, curr_freq, size = 0; |
1027 | int ret = 0; |
1028 | |
1029 | smu_cmn_get_sysfs_buf(&buf, &size); |
1030 | |
1031 | if (amdgpu_ras_intr_triggered()) { |
1032 | size += sysfs_emit_at(buf, size, "unavailable\n"); |
1033 | return size; |
1034 | } |
1035 | |
1036 | switch (clk_type) { |
1037 | case SMU_SCLK: |
1038 | single_dpm_table = &(dpm_context->dpm_tables.gfx_table); |
1039 | break; |
1040 | case SMU_MCLK: |
1041 | single_dpm_table = &(dpm_context->dpm_tables.uclk_table); |
1042 | break; |
1043 | case SMU_SOCCLK: |
1044 | single_dpm_table = &(dpm_context->dpm_tables.soc_table); |
1045 | break; |
1046 | case SMU_FCLK: |
1047 | single_dpm_table = &(dpm_context->dpm_tables.fclk_table); |
1048 | break; |
1049 | case SMU_VCLK: |
1050 | case SMU_VCLK1: |
1051 | single_dpm_table = &(dpm_context->dpm_tables.vclk_table); |
1052 | break; |
1053 | case SMU_DCLK: |
1054 | case SMU_DCLK1: |
1055 | single_dpm_table = &(dpm_context->dpm_tables.dclk_table); |
1056 | break; |
1057 | default: |
1058 | break; |
1059 | } |
1060 | |
1061 | switch (clk_type) { |
1062 | case SMU_SCLK: |
1063 | case SMU_MCLK: |
1064 | case SMU_SOCCLK: |
1065 | case SMU_FCLK: |
1066 | case SMU_VCLK: |
1067 | case SMU_VCLK1: |
1068 | case SMU_DCLK: |
1069 | case SMU_DCLK1: |
1070 | ret = smu_v13_0_7_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); |
1071 | if (ret) { |
1072 | dev_err(smu->adev->dev, "Failed to get current clock freq!")printf("drm:pid%d:%s *ERROR* " "Failed to get current clock freq!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1073 | return ret; |
1074 | } |
1075 | |
1076 | if (single_dpm_table->is_fine_grained) { |
1077 | /* |
1078 | * For fine grained dpms, there are only two dpm levels: |
1079 | * - level 0 -> min clock freq |
1080 | * - level 1 -> max clock freq |
1081 | * And the current clock frequency can be any value between them. |
1082 | * So, if the current clock frequency is not at level 0 or level 1, |
1083 | * we will fake it as three dpm levels: |
1084 | * - level 0 -> min clock freq |
1085 | * - level 1 -> current actual clock freq |
1086 | * - level 2 -> max clock freq |
1087 | */ |
1088 | if ((single_dpm_table->dpm_levels[0].value != curr_freq) && |
1089 | (single_dpm_table->dpm_levels[1].value != curr_freq)) { |
1090 | size += sysfs_emit_at(buf, size, "0: %uMhz\n", |
1091 | single_dpm_table->dpm_levels[0].value); |
1092 | size += sysfs_emit_at(buf, size, "1: %uMhz *\n", |
1093 | curr_freq); |
1094 | size += sysfs_emit_at(buf, size, "2: %uMhz\n", |
1095 | single_dpm_table->dpm_levels[1].value); |
1096 | } else { |
1097 | size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", |
1098 | single_dpm_table->dpm_levels[0].value, |
1099 | single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); |
1100 | size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", |
1101 | single_dpm_table->dpm_levels[1].value, |
1102 | single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); |
1103 | } |
1104 | } else { |
1105 | for (i = 0; i < single_dpm_table->count; i++) |
1106 | size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", |
1107 | i, single_dpm_table->dpm_levels[i].value, |
1108 | single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); |
1109 | } |
1110 | break; |
1111 | case SMU_PCIE: |
1112 | ret = smu_v13_0_7_get_smu_metrics_data(smu, |
1113 | METRICS_PCIE_RATE, |
1114 | &gen_speed); |
1115 | if (ret) |
1116 | return ret; |
1117 | |
1118 | ret = smu_v13_0_7_get_smu_metrics_data(smu, |
1119 | METRICS_PCIE_WIDTH, |
1120 | &lane_width); |
1121 | if (ret) |
1122 | return ret; |
1123 | |
1124 | pcie_table = &(dpm_context->dpm_tables.pcie_table); |
1125 | for (i = 0; i < pcie_table->num_of_link_levels; i++) |
1126 | size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, |
1127 | (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," : |
1128 | (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," : |
1129 | (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," : |
1130 | (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "", |
1131 | (pcie_table->pcie_lane[i] == 1) ? "x1" : |
1132 | (pcie_table->pcie_lane[i] == 2) ? "x2" : |
1133 | (pcie_table->pcie_lane[i] == 3) ? "x4" : |
1134 | (pcie_table->pcie_lane[i] == 4) ? "x8" : |
1135 | (pcie_table->pcie_lane[i] == 5) ? "x12" : |
1136 | (pcie_table->pcie_lane[i] == 6) ? "x16" : "", |
1137 | pcie_table->clk_freq[i], |
1138 | (gen_speed == pcie_table->pcie_gen[i]) && |
1139 | (lane_width == pcie_table->pcie_lane[i]) ? |
1140 | "*" : ""); |
1141 | break; |
1142 | |
1143 | default: |
1144 | break; |
1145 | } |
1146 | |
1147 | return size; |
1148 | } |
1149 | |
1150 | static int smu_v13_0_7_force_clk_levels(struct smu_context *smu, |
1151 | enum smu_clk_type clk_type, |
1152 | uint32_t mask) |
1153 | { |
1154 | struct smu_dpm_context *smu_dpm = &smu->smu_dpm; |
1155 | struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; |
1156 | struct smu_13_0_dpm_table *single_dpm_table; |
1157 | uint32_t soft_min_level, soft_max_level; |
1158 | uint32_t min_freq, max_freq; |
1159 | int ret = 0; |
1160 | |
1161 | soft_min_level = mask ? (ffs(mask) - 1) : 0; |
1162 | soft_max_level = mask ? (fls(mask) - 1) : 0; |
1163 | |
1164 | switch (clk_type) { |
1165 | case SMU_GFXCLK: |
1166 | case SMU_SCLK: |
1167 | single_dpm_table = &(dpm_context->dpm_tables.gfx_table); |
1168 | break; |
1169 | case SMU_MCLK: |
1170 | case SMU_UCLK: |
1171 | single_dpm_table = &(dpm_context->dpm_tables.uclk_table); |
1172 | break; |
1173 | case SMU_SOCCLK: |
1174 | single_dpm_table = &(dpm_context->dpm_tables.soc_table); |
1175 | break; |
1176 | case SMU_FCLK: |
1177 | single_dpm_table = &(dpm_context->dpm_tables.fclk_table); |
1178 | break; |
1179 | case SMU_VCLK: |
1180 | case SMU_VCLK1: |
1181 | single_dpm_table = &(dpm_context->dpm_tables.vclk_table); |
1182 | break; |
1183 | case SMU_DCLK: |
1184 | case SMU_DCLK1: |
1185 | single_dpm_table = &(dpm_context->dpm_tables.dclk_table); |
1186 | break; |
1187 | default: |
1188 | break; |
1189 | } |
1190 | |
1191 | switch (clk_type) { |
1192 | case SMU_GFXCLK: |
1193 | case SMU_SCLK: |
1194 | case SMU_MCLK: |
1195 | case SMU_UCLK: |
1196 | case SMU_SOCCLK: |
1197 | case SMU_FCLK: |
1198 | case SMU_VCLK: |
1199 | case SMU_VCLK1: |
1200 | case SMU_DCLK: |
1201 | case SMU_DCLK1: |
1202 | if (single_dpm_table->is_fine_grained) { |
1203 | /* There is only 2 levels for fine grained DPM */ |
1204 | soft_max_level = (soft_max_level >= 1 ? 1 : 0); |
1205 | soft_min_level = (soft_min_level >= 1 ? 1 : 0); |
1206 | } else { |
1207 | if ((soft_max_level >= single_dpm_table->count) || |
1208 | (soft_min_level >= single_dpm_table->count)) |
1209 | return -EINVAL22; |
1210 | } |
1211 | |
1212 | min_freq = single_dpm_table->dpm_levels[soft_min_level].value; |
1213 | max_freq = single_dpm_table->dpm_levels[soft_max_level].value; |
1214 | |
1215 | ret = smu_v13_0_set_soft_freq_limited_range(smu, |
1216 | clk_type, |
1217 | min_freq, |
1218 | max_freq); |
1219 | break; |
1220 | case SMU_DCEFCLK: |
1221 | case SMU_PCIE: |
1222 | default: |
1223 | break; |
1224 | } |
1225 | |
1226 | return ret; |
1227 | } |
1228 | |
1229 | static const struct smu_temperature_range smu13_thermal_policy[] = |
1230 | { |
1231 | {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, |
1232 | { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, |
1233 | }; |
1234 | |
1235 | static int smu_v13_0_7_get_thermal_temperature_range(struct smu_context *smu, |
1236 | struct smu_temperature_range *range) |
1237 | { |
1238 | struct smu_table_context *table_context = &smu->smu_table; |
1239 | struct smu_13_0_7_powerplay_table *powerplay_table = |
1240 | table_context->power_play_table; |
1241 | PPTable_t *pptable = smu->smu_table.driver_pptable; |
1242 | |
1243 | if (!range) |
1244 | return -EINVAL22; |
1245 | |
1246 | memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range))__builtin_memcpy((range), (&smu13_thermal_policy[0]), (sizeof (struct smu_temperature_range))); |
1247 | |
1248 | range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] * |
1249 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
1250 | range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE5) * |
1251 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
1252 | range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] * |
1253 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
1254 | range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT5) * |
1255 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
1256 | range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] * |
1257 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
1258 | range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM5)* |
1259 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
1260 | range->software_shutdown_temp = powerplay_table->software_shutdown_temp; |
1261 | range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset; |
1262 | |
1263 | return 0; |
1264 | } |
1265 | |
1266 | #ifndef MAX |
1267 | #define MAX(a, b)(((a)>(b))?(a):(b)) ((a) > (b) ? (a) : (b)) |
1268 | #endif |
1269 | static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu, |
1270 | void **table) |
1271 | { |
1272 | struct smu_table_context *smu_table = &smu->smu_table; |
1273 | struct gpu_metrics_v1_3 *gpu_metrics = |
1274 | (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; |
1275 | SmuMetricsExternal_t metrics_ext; |
1276 | SmuMetrics_t *metrics = &metrics_ext.SmuMetrics; |
1277 | int ret = 0; |
1278 | |
1279 | ret = smu_cmn_get_metrics_table(smu, |
1280 | &metrics_ext, |
1281 | true1); |
1282 | if (ret) |
1283 | return ret; |
1284 | |
1285 | smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); |
1286 | |
1287 | gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE]; |
1288 | gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT]; |
1289 | gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM]; |
1290 | gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX]; |
1291 | gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC]; |
1292 | gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0],(((metrics->AvgTemperature[TEMP_VR_MEM0])>(metrics-> AvgTemperature[TEMP_VR_MEM1]))?(metrics->AvgTemperature[TEMP_VR_MEM0 ]):(metrics->AvgTemperature[TEMP_VR_MEM1])) |
1293 | metrics->AvgTemperature[TEMP_VR_MEM1])(((metrics->AvgTemperature[TEMP_VR_MEM0])>(metrics-> AvgTemperature[TEMP_VR_MEM1]))?(metrics->AvgTemperature[TEMP_VR_MEM0 ]):(metrics->AvgTemperature[TEMP_VR_MEM1])); |
1294 | |
1295 | gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; |
1296 | gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; |
1297 | gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage,(((metrics->Vcn0ActivityPercentage)>(metrics->Vcn1ActivityPercentage ))?(metrics->Vcn0ActivityPercentage):(metrics->Vcn1ActivityPercentage )) |
1298 | metrics->Vcn1ActivityPercentage)(((metrics->Vcn0ActivityPercentage)>(metrics->Vcn1ActivityPercentage ))?(metrics->Vcn0ActivityPercentage):(metrics->Vcn1ActivityPercentage )); |
1299 | |
1300 | gpu_metrics->average_socket_power = metrics->AverageSocketPower; |
1301 | gpu_metrics->energy_accumulator = metrics->EnergyAccumulator; |
1302 | |
1303 | if (metrics->AverageGfxActivity <= SMU_13_0_7_BUSY_THRESHOLD15) |
1304 | gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs; |
1305 | else |
1306 | gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs; |
1307 | |
1308 | if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD15) |
1309 | gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs; |
1310 | else |
1311 | gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs; |
1312 | |
1313 | gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency; |
1314 | gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency; |
1315 | gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; |
1316 | gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; |
1317 | |
1318 | gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; |
1319 | gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; |
1320 | gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; |
1321 | gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1]; |
1322 | gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1]; |
1323 | |
1324 | gpu_metrics->throttle_status = |
1325 | smu_v13_0_7_get_throttler_status(metrics); |
1326 | gpu_metrics->indep_throttle_status = |
1327 | smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, |
1328 | smu_v13_0_7_throttler_map); |
1329 | |
1330 | gpu_metrics->current_fan_speed = metrics->AvgFanRpm; |
1331 | |
1332 | gpu_metrics->pcie_link_width = metrics->PcieWidth; |
1333 | gpu_metrics->pcie_link_speed = metrics->PcieRate; |
1334 | |
1335 | gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); |
1336 | |
1337 | gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_GFX]; |
1338 | gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_SOC]; |
1339 | gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VMEMP]; |
1340 | |
1341 | *table = (void *)gpu_metrics; |
1342 | |
1343 | return sizeof(struct gpu_metrics_v1_3); |
1344 | } |
1345 | |
1346 | static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu) |
1347 | { |
1348 | struct smu_13_0_dpm_context *dpm_context = |
1349 | smu->smu_dpm.dpm_context; |
1350 | struct smu_13_0_dpm_table *gfx_table = |
1351 | &dpm_context->dpm_tables.gfx_table; |
1352 | struct smu_13_0_dpm_table *mem_table = |
1353 | &dpm_context->dpm_tables.uclk_table; |
1354 | struct smu_13_0_dpm_table *soc_table = |
1355 | &dpm_context->dpm_tables.soc_table; |
1356 | struct smu_13_0_dpm_table *vclk_table = |
1357 | &dpm_context->dpm_tables.vclk_table; |
1358 | struct smu_13_0_dpm_table *dclk_table = |
1359 | &dpm_context->dpm_tables.dclk_table; |
1360 | struct smu_13_0_dpm_table *fclk_table = |
1361 | &dpm_context->dpm_tables.fclk_table; |
1362 | struct smu_umd_pstate_table *pstate_table = |
1363 | &smu->pstate_table; |
1364 | struct smu_table_context *table_context = &smu->smu_table; |
1365 | PPTable_t *pptable = table_context->driver_pptable; |
1366 | DriverReportedClocks_t driver_clocks = |
1367 | pptable->SkuTable.DriverReportedClocks; |
1368 | |
1369 | pstate_table->gfxclk_pstate.min = gfx_table->min; |
1370 | if (driver_clocks.GameClockAc && |
1371 | (driver_clocks.GameClockAc < gfx_table->max)) |
1372 | pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc; |
1373 | else |
1374 | pstate_table->gfxclk_pstate.peak = gfx_table->max; |
1375 | |
1376 | pstate_table->uclk_pstate.min = mem_table->min; |
1377 | pstate_table->uclk_pstate.peak = mem_table->max; |
1378 | |
1379 | pstate_table->socclk_pstate.min = soc_table->min; |
1380 | pstate_table->socclk_pstate.peak = soc_table->max; |
1381 | |
1382 | pstate_table->vclk_pstate.min = vclk_table->min; |
1383 | pstate_table->vclk_pstate.peak = vclk_table->max; |
1384 | |
1385 | pstate_table->dclk_pstate.min = dclk_table->min; |
1386 | pstate_table->dclk_pstate.peak = dclk_table->max; |
1387 | |
1388 | pstate_table->fclk_pstate.min = fclk_table->min; |
1389 | pstate_table->fclk_pstate.peak = fclk_table->max; |
1390 | |
1391 | if (driver_clocks.BaseClockAc && |
1392 | driver_clocks.BaseClockAc < gfx_table->max) |
1393 | pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc; |
1394 | else |
1395 | pstate_table->gfxclk_pstate.standard = gfx_table->max; |
1396 | pstate_table->uclk_pstate.standard = mem_table->max; |
1397 | pstate_table->socclk_pstate.standard = soc_table->min; |
1398 | pstate_table->vclk_pstate.standard = vclk_table->min; |
1399 | pstate_table->dclk_pstate.standard = dclk_table->min; |
1400 | pstate_table->fclk_pstate.standard = fclk_table->min; |
1401 | |
1402 | return 0; |
1403 | } |
1404 | |
1405 | static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu, |
1406 | uint32_t *speed) |
1407 | { |
1408 | int ret; |
1409 | |
1410 | if (!speed) |
1411 | return -EINVAL22; |
1412 | |
1413 | ret = smu_v13_0_7_get_smu_metrics_data(smu, |
1414 | METRICS_CURR_FANPWM, |
1415 | speed); |
1416 | if (ret) { |
1417 | dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!")printf("drm:pid%d:%s *ERROR* " "Failed to get fan speed(PWM)!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1418 | return ret; |
1419 | } |
1420 | |
1421 | /* Convert the PMFW output which is in percent to pwm(255) based */ |
1422 | *speed = MIN(*speed * 255 / 100, 255)(((*speed * 255 / 100)<(255))?(*speed * 255 / 100):(255)); |
1423 | |
1424 | return 0; |
1425 | } |
1426 | |
1427 | static int smu_v13_0_7_get_fan_speed_rpm(struct smu_context *smu, |
1428 | uint32_t *speed) |
1429 | { |
1430 | if (!speed) |
1431 | return -EINVAL22; |
1432 | |
1433 | return smu_v13_0_7_get_smu_metrics_data(smu, |
1434 | METRICS_CURR_FANSPEED, |
1435 | speed); |
1436 | } |
1437 | |
1438 | static int smu_v13_0_7_enable_mgpu_fan_boost(struct smu_context *smu) |
1439 | { |
1440 | struct smu_table_context *table_context = &smu->smu_table; |
1441 | PPTable_t *pptable = table_context->driver_pptable; |
1442 | SkuTable_t *skutable = &pptable->SkuTable; |
1443 | |
1444 | /* |
1445 | * Skip the MGpuFanBoost setting for those ASICs |
1446 | * which do not support it |
1447 | */ |
1448 | if (skutable->MGpuAcousticLimitRpmThreshold == 0) |
1449 | return 0; |
1450 | |
1451 | return smu_cmn_send_smc_msg_with_param(smu, |
1452 | SMU_MSG_SetMGpuFanBoostLimitRpm, |
1453 | 0, |
1454 | NULL((void *)0)); |
1455 | } |
1456 | |
1457 | static int smu_v13_0_7_get_power_limit(struct smu_context *smu, |
1458 | uint32_t *current_power_limit, |
1459 | uint32_t *default_power_limit, |
1460 | uint32_t *max_power_limit) |
1461 | { |
1462 | struct smu_table_context *table_context = &smu->smu_table; |
1463 | struct smu_13_0_7_powerplay_table *powerplay_table = |
1464 | (struct smu_13_0_7_powerplay_table *)table_context->power_play_table; |
1465 | PPTable_t *pptable = table_context->driver_pptable; |
1466 | SkuTable_t *skutable = &pptable->SkuTable; |
1467 | uint32_t power_limit, od_percent; |
1468 | |
1469 | if (smu_v13_0_get_current_power_limit(smu, &power_limit)) |
1470 | power_limit = smu->adev->pm.ac_power ? |
1471 | skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] : |
1472 | skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0]; |
1473 | |
1474 | if (current_power_limit) |
1475 | *current_power_limit = power_limit; |
1476 | if (default_power_limit) |
1477 | *default_power_limit = power_limit; |
1478 | |
1479 | if (max_power_limit) { |
1480 | if (smu->od_enabled) { |
1481 | od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE])((__uint32_t)(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE ])); |
1482 | |
1483 | dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit)do { } while(0); |
1484 | |
1485 | power_limit *= (100 + od_percent); |
1486 | power_limit /= 100; |
1487 | } |
1488 | *max_power_limit = power_limit; |
1489 | } |
1490 | |
1491 | return 0; |
1492 | } |
1493 | |
1494 | static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf) |
1495 | { |
1496 | DpmActivityMonitorCoeffIntExternal_t *activity_monitor_external; |
1497 | uint32_t i, j, size = 0; |
1498 | int16_t workload_type = 0; |
1499 | int result = 0; |
1500 | |
1501 | if (!buf) |
1502 | return -EINVAL22; |
1503 | |
1504 | activity_monitor_external = kcalloc(PP_SMC_POWER_PROFILE_COUNT, |
1505 | sizeof(*activity_monitor_external), |
1506 | GFP_KERNEL(0x0001 | 0x0004)); |
1507 | if (!activity_monitor_external) |
1508 | return -ENOMEM12; |
1509 | |
1510 | size += sysfs_emit_at(buf, size, " "); |
1511 | for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) |
1512 | size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i], |
1513 | (i == smu->power_profile_mode) ? "* " : " "); |
1514 | |
1515 | size += sysfs_emit_at(buf, size, "\n"); |
1516 | |
1517 | for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) { |
1518 | /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ |
1519 | workload_type = smu_cmn_to_asic_specific_index(smu, |
1520 | CMN2ASIC_MAPPING_WORKLOAD, |
1521 | i); |
1522 | if (workload_type < 0) { |
1523 | result = -EINVAL22; |
1524 | goto out; |
1525 | } |
1526 | |
1527 | result = smu_cmn_update_table(smu, |
1528 | SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type, |
1529 | (void *)(&activity_monitor_external[i]), false0); |
1530 | if (result) { |
1531 | dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__)printf("drm:pid%d:%s *ERROR* " "[%s] Failed to get activity monitor!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__ ); |
1532 | goto out; |
1533 | } |
1534 | } |
1535 | |
1536 | #define PRINT_DPM_MONITOR(field) \ |
1537 | do { \ |
1538 | size += sysfs_emit_at(buf, size, "%-30s", #field); \ |
1539 | for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \ |
1540 | size += sysfs_emit_at(buf, size, "%-16d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \ |
1541 | size += sysfs_emit_at(buf, size, "\n"); \ |
1542 | } while (0) |
1543 | |
1544 | PRINT_DPM_MONITOR(Gfx_ActiveHystLimit); |
1545 | PRINT_DPM_MONITOR(Gfx_IdleHystLimit); |
1546 | PRINT_DPM_MONITOR(Gfx_FPS); |
1547 | PRINT_DPM_MONITOR(Gfx_MinActiveFreqType); |
1548 | PRINT_DPM_MONITOR(Gfx_BoosterFreqType); |
1549 | PRINT_DPM_MONITOR(Gfx_MinActiveFreq); |
1550 | PRINT_DPM_MONITOR(Gfx_BoosterFreq); |
1551 | PRINT_DPM_MONITOR(Fclk_ActiveHystLimit); |
1552 | PRINT_DPM_MONITOR(Fclk_IdleHystLimit); |
1553 | PRINT_DPM_MONITOR(Fclk_FPS); |
1554 | PRINT_DPM_MONITOR(Fclk_MinActiveFreqType); |
1555 | PRINT_DPM_MONITOR(Fclk_BoosterFreqType); |
1556 | PRINT_DPM_MONITOR(Fclk_MinActiveFreq); |
1557 | PRINT_DPM_MONITOR(Fclk_BoosterFreq); |
1558 | #undef PRINT_DPM_MONITOR |
1559 | |
1560 | result = size; |
1561 | out: |
1562 | kfree(activity_monitor_external); |
1563 | return result; |
1564 | } |
1565 | |
1566 | static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) |
1567 | { |
1568 | |
1569 | DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; |
1570 | DpmActivityMonitorCoeffInt_t *activity_monitor = |
1571 | &(activity_monitor_external.DpmActivityMonitorCoeffInt); |
1572 | int workload_type, ret = 0; |
1573 | |
1574 | smu->power_profile_mode = input[size]; |
1575 | |
1576 | if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) { |
1577 | dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode)printf("drm:pid%d:%s *ERROR* " "Invalid power profile mode %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , smu-> power_profile_mode); |
1578 | return -EINVAL22; |
1579 | } |
1580 | |
1581 | if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { |
1582 | |
1583 | ret = smu_cmn_update_table(smu, |
1584 | SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT6, |
1585 | (void *)(&activity_monitor_external), false0); |
1586 | if (ret) { |
1587 | dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__)printf("drm:pid%d:%s *ERROR* " "[%s] Failed to get activity monitor!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__ ); |
1588 | return ret; |
1589 | } |
1590 | |
1591 | switch (input[0]) { |
1592 | case 0: /* Gfxclk */ |
1593 | activity_monitor->Gfx_ActiveHystLimit = input[1]; |
1594 | activity_monitor->Gfx_IdleHystLimit = input[2]; |
1595 | activity_monitor->Gfx_FPS = input[3]; |
1596 | activity_monitor->Gfx_MinActiveFreqType = input[4]; |
1597 | activity_monitor->Gfx_BoosterFreqType = input[5]; |
1598 | activity_monitor->Gfx_MinActiveFreq = input[6]; |
1599 | activity_monitor->Gfx_BoosterFreq = input[7]; |
1600 | break; |
1601 | case 1: /* Fclk */ |
1602 | activity_monitor->Fclk_ActiveHystLimit = input[1]; |
1603 | activity_monitor->Fclk_IdleHystLimit = input[2]; |
1604 | activity_monitor->Fclk_FPS = input[3]; |
1605 | activity_monitor->Fclk_MinActiveFreqType = input[4]; |
1606 | activity_monitor->Fclk_BoosterFreqType = input[5]; |
1607 | activity_monitor->Fclk_MinActiveFreq = input[6]; |
1608 | activity_monitor->Fclk_BoosterFreq = input[7]; |
1609 | break; |
1610 | } |
1611 | |
1612 | ret = smu_cmn_update_table(smu, |
1613 | SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT6, |
1614 | (void *)(&activity_monitor_external), true1); |
1615 | if (ret) { |
1616 | dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__)printf("drm:pid%d:%s *ERROR* " "[%s] Failed to set activity monitor!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__ ); |
1617 | return ret; |
1618 | } |
1619 | } |
1620 | |
1621 | /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ |
1622 | workload_type = smu_cmn_to_asic_specific_index(smu, |
1623 | CMN2ASIC_MAPPING_WORKLOAD, |
1624 | smu->power_profile_mode); |
1625 | if (workload_type < 0) |
1626 | return -EINVAL22; |
1627 | smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, |
1628 | 1 << workload_type, NULL((void *)0)); |
1629 | |
1630 | return ret; |
1631 | } |
1632 | |
1633 | static int smu_v13_0_7_set_mp1_state(struct smu_context *smu, |
1634 | enum pp_mp1_state mp1_state) |
1635 | { |
1636 | int ret; |
1637 | |
1638 | switch (mp1_state) { |
1639 | case PP_MP1_STATE_UNLOAD: |
1640 | ret = smu_cmn_set_mp1_state(smu, mp1_state); |
1641 | break; |
1642 | default: |
1643 | /* Ignore others */ |
1644 | ret = 0; |
1645 | } |
1646 | |
1647 | return ret; |
1648 | } |
1649 | |
1650 | static int smu_v13_0_7_baco_enter(struct smu_context *smu) |
1651 | { |
1652 | struct smu_baco_context *smu_baco = &smu->smu_baco; |
1653 | struct amdgpu_device *adev = smu->adev; |
1654 | |
1655 | if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) |
1656 | return smu_v13_0_baco_set_armd3_sequence(smu, |
1657 | smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); |
1658 | else |
1659 | return smu_v13_0_baco_enter(smu); |
1660 | } |
1661 | |
1662 | static int smu_v13_0_7_baco_exit(struct smu_context *smu) |
1663 | { |
1664 | struct amdgpu_device *adev = smu->adev; |
1665 | |
1666 | if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { |
1667 | /* Wait for PMFW handling for the Dstate change */ |
1668 | usleep_range(10000, 11000); |
1669 | return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); |
1670 | } else { |
1671 | return smu_v13_0_baco_exit(smu); |
1672 | } |
1673 | } |
1674 | |
1675 | static bool_Bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) |
1676 | { |
1677 | struct amdgpu_device *adev = smu->adev; |
1678 | |
1679 | /* SRIOV does not support SMU mode1 reset */ |
1680 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) |
1681 | return false0; |
1682 | |
1683 | return true1; |
1684 | } |
1685 | |
1686 | static int smu_v13_0_7_set_df_cstate(struct smu_context *smu, |
1687 | enum pp_df_cstate state) |
1688 | { |
1689 | return smu_cmn_send_smc_msg_with_param(smu, |
1690 | SMU_MSG_DFCstateControl, |
1691 | state, |
1692 | NULL((void *)0)); |
1693 | } |
1694 | |
1695 | static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { |
1696 | .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, |
1697 | .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, |
1698 | .is_dpm_running = smu_v13_0_7_is_dpm_running, |
1699 | .dump_pptable = smu_v13_0_7_dump_pptable, |
1700 | .init_microcode = smu_v13_0_init_microcode, |
1701 | .load_microcode = smu_v13_0_load_microcode, |
1702 | .fini_microcode = smu_v13_0_fini_microcode, |
1703 | .init_smc_tables = smu_v13_0_7_init_smc_tables, |
1704 | .fini_smc_tables = smu_v13_0_fini_smc_tables, |
1705 | .init_power = smu_v13_0_init_power, |
1706 | .fini_power = smu_v13_0_fini_power, |
1707 | .check_fw_status = smu_v13_0_7_check_fw_status, |
1708 | .setup_pptable = smu_v13_0_7_setup_pptable, |
1709 | .check_fw_version = smu_v13_0_check_fw_version, |
1710 | .write_pptable = smu_cmn_write_pptable, |
1711 | .set_driver_table_location = smu_v13_0_set_driver_table_location, |
1712 | .system_features_control = smu_v13_0_system_features_control, |
1713 | .set_allowed_mask = smu_v13_0_set_allowed_mask, |
1714 | .get_enabled_mask = smu_cmn_get_enabled_mask, |
1715 | .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, |
1716 | .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, |
1717 | .init_pptable_microcode = smu_v13_0_init_pptable_microcode, |
1718 | .populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk, |
1719 | .get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq, |
1720 | .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, |
1721 | .read_sensor = smu_v13_0_7_read_sensor, |
1722 | .feature_is_enabled = smu_cmn_feature_is_enabled, |
1723 | .print_clk_levels = smu_v13_0_7_print_clk_levels, |
1724 | .force_clk_levels = smu_v13_0_7_force_clk_levels, |
1725 | .update_pcie_parameters = smu_v13_0_update_pcie_parameters, |
1726 | .get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range, |
1727 | .register_irq_handler = smu_v13_0_register_irq_handler, |
1728 | .enable_thermal_alert = smu_v13_0_enable_thermal_alert, |
1729 | .disable_thermal_alert = smu_v13_0_disable_thermal_alert, |
1730 | .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, |
1731 | .get_gpu_metrics = smu_v13_0_7_get_gpu_metrics, |
1732 | .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range, |
1733 | .set_performance_level = smu_v13_0_set_performance_level, |
1734 | .gfx_off_control = smu_v13_0_gfx_off_control, |
1735 | .get_fan_speed_pwm = smu_v13_0_7_get_fan_speed_pwm, |
1736 | .get_fan_speed_rpm = smu_v13_0_7_get_fan_speed_rpm, |
1737 | .set_fan_speed_pwm = smu_v13_0_set_fan_speed_pwm, |
1738 | .set_fan_speed_rpm = smu_v13_0_set_fan_speed_rpm, |
1739 | .get_fan_control_mode = smu_v13_0_get_fan_control_mode, |
1740 | .set_fan_control_mode = smu_v13_0_set_fan_control_mode, |
1741 | .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost, |
1742 | .get_power_limit = smu_v13_0_7_get_power_limit, |
1743 | .set_power_limit = smu_v13_0_set_power_limit, |
1744 | .set_power_source = smu_v13_0_set_power_source, |
1745 | .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode, |
1746 | .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode, |
1747 | .set_tool_table_location = smu_v13_0_set_tool_table_location, |
1748 | .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, |
1749 | .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, |
1750 | .baco_is_support = smu_v13_0_baco_is_support, |
1751 | .baco_get_state = smu_v13_0_baco_get_state, |
1752 | .baco_set_state = smu_v13_0_baco_set_state, |
1753 | .baco_enter = smu_v13_0_7_baco_enter, |
1754 | .baco_exit = smu_v13_0_7_baco_exit, |
1755 | .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, |
1756 | .mode1_reset = smu_v13_0_mode1_reset, |
1757 | .set_mp1_state = smu_v13_0_7_set_mp1_state, |
1758 | .set_df_cstate = smu_v13_0_7_set_df_cstate, |
1759 | .gpo_control = smu_v13_0_gpo_control, |
1760 | }; |
1761 | |
1762 | void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) |
1763 | { |
1764 | smu->ppt_funcs = &smu_v13_0_7_ppt_funcs; |
1765 | smu->message_map = smu_v13_0_7_message_map; |
1766 | smu->clock_map = smu_v13_0_7_clk_map; |
1767 | smu->feature_map = smu_v13_0_7_feature_mask_map; |
1768 | smu->table_map = smu_v13_0_7_table_map; |
1769 | smu->pwr_src_map = smu_v13_0_7_pwr_src_map; |
1770 | smu->workload_map = smu_v13_0_7_workload_map; |
1771 | smu_v13_0_set_smu_mailbox_registers(smu); |
1772 | } |