File: | dev/pci/drm/amd/pm/swsmu/smu11/vangogh_ppt.c |
Warning: | line 818, column 24 Value stored to 'adev' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2020 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #define SWSMU_CODE_LAYER_L2 |
25 | |
26 | #include "amdgpu.h" |
27 | #include "amdgpu_smu.h" |
28 | #include "smu_v11_0.h" |
29 | #include "smu11_driver_if_vangogh.h" |
30 | #include "vangogh_ppt.h" |
31 | #include "smu_v11_5_ppsmc.h" |
32 | #include "smu_v11_5_pmfw.h" |
33 | #include "smu_cmn.h" |
34 | #include "soc15_common.h" |
35 | #include "asic_reg/gc/gc_10_3_0_offset.h" |
36 | #include "asic_reg/gc/gc_10_3_0_sh_mask.h" |
37 | #include <asm/processor.h> |
38 | |
39 | /* |
40 | * DO NOT use these for err/warn/info/debug messages. |
41 | * Use dev_err, dev_warn, dev_info and dev_dbg instead. |
42 | * They are more MGPU friendly. |
43 | */ |
44 | #undef pr_err |
45 | #undef pr_warn |
46 | #undef pr_info |
47 | #undef pr_debug |
48 | |
49 | // Registers related to GFXOFF |
50 | // addressBlock: smuio_smuio_SmuSmuioDec |
51 | // base address: 0x5a000 |
52 | #define mmSMUIO_GFX_MISC_CNTL0x00c5 0x00c5 |
53 | #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX0 0 |
54 | |
55 | //SMUIO_GFX_MISC_CNTL |
56 | #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT0x0 0x0 |
57 | #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT0x1 0x1 |
58 | #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK0x00000001L 0x00000001L |
59 | #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK0x00000006L 0x00000006L |
60 | |
61 | #define FEATURE_MASK(feature)(1ULL << feature) (1ULL << feature) |
62 | #define SMC_DPM_FEATURE( (1ULL << 0) | (1ULL << 11) | (1ULL << 13) | (1ULL << 14) | (1ULL << 15) | (1ULL << 16 ) | (1ULL << 17) | (1ULL << 18)| (1ULL << 19 )) ( \ |
63 | FEATURE_MASK(FEATURE_CCLK_DPM_BIT)(1ULL << 0) | \ |
64 | FEATURE_MASK(FEATURE_VCN_DPM_BIT)(1ULL << 11) | \ |
65 | FEATURE_MASK(FEATURE_FCLK_DPM_BIT)(1ULL << 13) | \ |
66 | FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT)(1ULL << 14) | \ |
67 | FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)(1ULL << 15) | \ |
68 | FEATURE_MASK(FEATURE_LCLK_DPM_BIT)(1ULL << 16) | \ |
69 | FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT)(1ULL << 17) | \ |
70 | FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)(1ULL << 18)| \ |
71 | FEATURE_MASK(FEATURE_GFX_DPM_BIT)(1ULL << 19)) |
72 | |
73 | static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = { |
74 | MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0)[SMU_MSG_TestMessage] = {1, (0x1), (0)}, |
75 | MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0)[SMU_MSG_GetSmuVersion] = {1, (0x2), (0)}, |
76 | MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0)[SMU_MSG_GetDriverIfVersion] = {1, (0x3), (0)}, |
77 | MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0)[SMU_MSG_EnableGfxOff] = {1, (0x4), (0)}, |
78 | MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0)[SMU_MSG_AllowGfxOff] = {1, (0x1D), (0)}, |
79 | MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0)[SMU_MSG_DisallowGfxOff] = {1, (0x1E), (0)}, |
80 | MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0)[SMU_MSG_PowerDownIspByTile] = {1, (0x6), (0)}, |
81 | MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0)[SMU_MSG_PowerUpIspByTile] = {1, (0x7), (0)}, |
82 | MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0)[SMU_MSG_PowerDownVcn] = {1, (0x8), (0)}, |
83 | MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0)[SMU_MSG_PowerUpVcn] = {1, (0x9), (0)}, |
84 | MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0)[SMU_MSG_RlcPowerNotify] = {1, (0xA), (0)}, |
85 | MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0)[SMU_MSG_SetHardMinVcn] = {1, (0xB), (0)}, |
86 | MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0)[SMU_MSG_SetSoftMinGfxclk] = {1, (0xC), (0)}, |
87 | MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0)[SMU_MSG_ActiveProcessNotify] = {1, (0xD), (0)}, |
88 | MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 0)[SMU_MSG_SetHardMinIspiclkByFreq] = {1, (0xE), (0)}, |
89 | MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 0)[SMU_MSG_SetHardMinIspxclkByFreq] = {1, (0xF), (0)}, |
90 | MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0)[SMU_MSG_SetDriverDramAddrHigh] = {1, (0x10), (0)}, |
91 | MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0)[SMU_MSG_SetDriverDramAddrLow] = {1, (0x11), (0)}, |
92 | MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0)[SMU_MSG_TransferTableSmu2Dram] = {1, (0x12), (0)}, |
93 | MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0)[SMU_MSG_TransferTableDram2Smu] = {1, (0x13), (0)}, |
94 | MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0)[SMU_MSG_GfxDeviceDriverReset] = {1, (0x14), (0)}, |
95 | MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0)[SMU_MSG_GetEnabledSmuFeatures] = {1, (0x15), (0)}, |
96 | MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0)[SMU_MSG_SetHardMinSocclkByFreq] = {1, (0x17), (0)}, |
97 | MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0)[SMU_MSG_SetSoftMinFclk] = {1, (0x18), (0)}, |
98 | MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0)[SMU_MSG_SetSoftMinVcn] = {1, (0x19), (0)}, |
99 | MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 0)[SMU_MSG_EnablePostCode] = {1, (0x1A), (0)}, |
100 | MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 0)[SMU_MSG_GetGfxclkFrequency] = {1, (0x1B), (0)}, |
101 | MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 0)[SMU_MSG_GetFclkFrequency] = {1, (0x1C), (0)}, |
102 | MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0)[SMU_MSG_SetSoftMaxGfxClk] = {1, (0x1F), (0)}, |
103 | MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 0)[SMU_MSG_SetHardMinGfxClk] = {1, (0x20), (0)}, |
104 | MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0)[SMU_MSG_SetSoftMaxSocclkByFreq] = {1, (0x21), (0)}, |
105 | MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0)[SMU_MSG_SetSoftMaxFclkByFreq] = {1, (0x22), (0)}, |
106 | MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0)[SMU_MSG_SetSoftMaxVcn] = {1, (0x23), (0)}, |
107 | MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0)[SMU_MSG_SetPowerLimitPercentage] = {1, (0x25), (0)}, |
108 | MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0)[SMU_MSG_PowerDownJpeg] = {1, (0x26), (0)}, |
109 | MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0)[SMU_MSG_PowerUpJpeg] = {1, (0x27), (0)}, |
110 | MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 0)[SMU_MSG_SetHardMinFclkByFreq] = {1, (0x28), (0)}, |
111 | MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 0)[SMU_MSG_SetSoftMinSocclkByFreq] = {1, (0x29), (0)}, |
112 | MSG_MAP(PowerUpCvip, PPSMC_MSG_PowerUpCvip, 0)[SMU_MSG_PowerUpCvip] = {1, (0x2A), (0)}, |
113 | MSG_MAP(PowerDownCvip, PPSMC_MSG_PowerDownCvip, 0)[SMU_MSG_PowerDownCvip] = {1, (0x2B), (0)}, |
114 | MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0)[SMU_MSG_GetPptLimit] = {1, (0x2C), (0)}, |
115 | MSG_MAP(GetThermalLimit, PPSMC_MSG_GetThermalLimit, 0)[SMU_MSG_GetThermalLimit] = {1, (0x2D), (0)}, |
116 | MSG_MAP(GetCurrentTemperature, PPSMC_MSG_GetCurrentTemperature, 0)[SMU_MSG_GetCurrentTemperature] = {1, (0x2E), (0)}, |
117 | MSG_MAP(GetCurrentPower, PPSMC_MSG_GetCurrentPower, 0)[SMU_MSG_GetCurrentPower] = {1, (0x2F), (0)}, |
118 | MSG_MAP(GetCurrentVoltage, PPSMC_MSG_GetCurrentVoltage, 0)[SMU_MSG_GetCurrentVoltage] = {1, (0x30), (0)}, |
119 | MSG_MAP(GetCurrentCurrent, PPSMC_MSG_GetCurrentCurrent, 0)[SMU_MSG_GetCurrentCurrent] = {1, (0x31), (0)}, |
120 | MSG_MAP(GetAverageCpuActivity, PPSMC_MSG_GetAverageCpuActivity, 0)[SMU_MSG_GetAverageCpuActivity] = {1, (0x32), (0)}, |
121 | MSG_MAP(GetAverageGfxActivity, PPSMC_MSG_GetAverageGfxActivity, 0)[SMU_MSG_GetAverageGfxActivity] = {1, (0x33), (0)}, |
122 | MSG_MAP(GetAveragePower, PPSMC_MSG_GetAveragePower, 0)[SMU_MSG_GetAveragePower] = {1, (0x34), (0)}, |
123 | MSG_MAP(GetAverageTemperature, PPSMC_MSG_GetAverageTemperature, 0)[SMU_MSG_GetAverageTemperature] = {1, (0x35), (0)}, |
124 | MSG_MAP(SetAveragePowerTimeConstant, PPSMC_MSG_SetAveragePowerTimeConstant, 0)[SMU_MSG_SetAveragePowerTimeConstant] = {1, (0x36), (0)}, |
125 | MSG_MAP(SetAverageActivityTimeConstant, PPSMC_MSG_SetAverageActivityTimeConstant, 0)[SMU_MSG_SetAverageActivityTimeConstant] = {1, (0x37), (0)}, |
126 | MSG_MAP(SetAverageTemperatureTimeConstant, PPSMC_MSG_SetAverageTemperatureTimeConstant, 0)[SMU_MSG_SetAverageTemperatureTimeConstant] = {1, (0x38), (0) }, |
127 | MSG_MAP(SetMitigationEndHysteresis, PPSMC_MSG_SetMitigationEndHysteresis, 0)[SMU_MSG_SetMitigationEndHysteresis] = {1, (0x39), (0)}, |
128 | MSG_MAP(GetCurrentFreq, PPSMC_MSG_GetCurrentFreq, 0)[SMU_MSG_GetCurrentFreq] = {1, (0x3A), (0)}, |
129 | MSG_MAP(SetReducedPptLimit, PPSMC_MSG_SetReducedPptLimit, 0)[SMU_MSG_SetReducedPptLimit] = {1, (0x3B), (0)}, |
130 | MSG_MAP(SetReducedThermalLimit, PPSMC_MSG_SetReducedThermalLimit, 0)[SMU_MSG_SetReducedThermalLimit] = {1, (0x3C), (0)}, |
131 | MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0)[SMU_MSG_DramLogSetDramAddr] = {1, (0x3D), (0)}, |
132 | MSG_MAP(StartDramLogging, PPSMC_MSG_StartDramLogging, 0)[SMU_MSG_StartDramLogging] = {1, (0x3E), (0)}, |
133 | MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0)[SMU_MSG_StopDramLogging] = {1, (0x3F), (0)}, |
134 | MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0)[SMU_MSG_SetSoftMinCclk] = {1, (0x40), (0)}, |
135 | MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0)[SMU_MSG_SetSoftMaxCclk] = {1, (0x41), (0)}, |
136 | MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0)[SMU_MSG_RequestActiveWgp] = {1, (0x47), (0)}, |
137 | MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0)[SMU_MSG_SetFastPPTLimit] = {1, (0x49), (0)}, |
138 | MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0)[SMU_MSG_SetSlowPPTLimit] = {1, (0x4A), (0)}, |
139 | MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0)[SMU_MSG_GetFastPPTLimit] = {1, (0x4B), (0)}, |
140 | MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0)[SMU_MSG_GetSlowPPTLimit] = {1, (0x4C), (0)}, |
141 | MSG_MAP(GetGfxOffStatus, PPSMC_MSG_GetGfxOffStatus, 0)[SMU_MSG_GetGfxOffStatus] = {1, (0x50), (0)}, |
142 | MSG_MAP(GetGfxOffEntryCount, PPSMC_MSG_GetGfxOffEntryCount, 0)[SMU_MSG_GetGfxOffEntryCount] = {1, (0x51), (0)}, |
143 | MSG_MAP(LogGfxOffResidency, PPSMC_MSG_LogGfxOffResidency, 0)[SMU_MSG_LogGfxOffResidency] = {1, (0x52), (0)}, |
144 | }; |
145 | |
146 | static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = { |
147 | FEA_MAP(PPT)[SMU_FEATURE_PPT_BIT] = {1, 3}, |
148 | FEA_MAP(TDC)[SMU_FEATURE_TDC_BIT] = {1, 4}, |
149 | FEA_MAP(THERMAL)[SMU_FEATURE_THERMAL_BIT] = {1, 5}, |
150 | FEA_MAP(DS_GFXCLK)[SMU_FEATURE_DS_GFXCLK_BIT] = {1, 20}, |
151 | FEA_MAP(DS_SOCCLK)[SMU_FEATURE_DS_SOCCLK_BIT] = {1, 21}, |
152 | FEA_MAP(DS_LCLK)[SMU_FEATURE_DS_LCLK_BIT] = {1, 22}, |
153 | FEA_MAP(DS_FCLK)[SMU_FEATURE_DS_FCLK_BIT] = {1, 28}, |
154 | FEA_MAP(DS_MP1CLK)[SMU_FEATURE_DS_MP1CLK_BIT] = {1, 30}, |
155 | FEA_MAP(DS_MP0CLK)[SMU_FEATURE_DS_MP0CLK_BIT] = {1, 31}, |
156 | FEA_MAP(ATHUB_PG)[SMU_FEATURE_ATHUB_PG_BIT] = {1, 56}, |
157 | FEA_MAP(CCLK_DPM)[SMU_FEATURE_CCLK_DPM_BIT] = {1, 0}, |
158 | FEA_MAP(FAN_CONTROLLER)[SMU_FEATURE_FAN_CONTROLLER_BIT] = {1, 1}, |
159 | FEA_MAP(ULV)[SMU_FEATURE_ULV_BIT] = {1, 9}, |
160 | FEA_MAP(VCN_DPM)[SMU_FEATURE_VCN_DPM_BIT] = {1, 11}, |
161 | FEA_MAP(LCLK_DPM)[SMU_FEATURE_LCLK_DPM_BIT] = {1, 16}, |
162 | FEA_MAP(SHUBCLK_DPM)[SMU_FEATURE_SHUBCLK_DPM_BIT] = {1, 17}, |
163 | FEA_MAP(DCFCLK_DPM)[SMU_FEATURE_DCFCLK_DPM_BIT] = {1, 18}, |
164 | FEA_MAP(DS_DCFCLK)[SMU_FEATURE_DS_DCFCLK_BIT] = {1, 23}, |
165 | FEA_MAP(S0I2)[SMU_FEATURE_S0I2_BIT] = {1, 26}, |
166 | FEA_MAP(SMU_LOW_POWER)[SMU_FEATURE_SMU_LOW_POWER_BIT] = {1, 32}, |
167 | FEA_MAP(GFX_DEM)[SMU_FEATURE_GFX_DEM_BIT] = {1, 34}, |
168 | FEA_MAP(PSI)[SMU_FEATURE_PSI_BIT] = {1, 35}, |
169 | FEA_MAP(PROCHOT)[SMU_FEATURE_PROCHOT_BIT] = {1, 36}, |
170 | FEA_MAP(CPUOFF)[SMU_FEATURE_CPUOFF_BIT] = {1, 37}, |
171 | FEA_MAP(STAPM)[SMU_FEATURE_STAPM_BIT] = {1, 38}, |
172 | FEA_MAP(S0I3)[SMU_FEATURE_S0I3_BIT] = {1, 39}, |
173 | FEA_MAP(DF_CSTATES)[SMU_FEATURE_DF_CSTATES_BIT] = {1, 40}, |
174 | FEA_MAP(PERF_LIMIT)[SMU_FEATURE_PERF_LIMIT_BIT] = {1, 41}, |
175 | FEA_MAP(CORE_DLDO)[SMU_FEATURE_CORE_DLDO_BIT] = {1, 42}, |
176 | FEA_MAP(RSMU_LOW_POWER)[SMU_FEATURE_RSMU_LOW_POWER_BIT] = {1, 43}, |
177 | FEA_MAP(SMN_LOW_POWER)[SMU_FEATURE_SMN_LOW_POWER_BIT] = {1, 44}, |
178 | FEA_MAP(THM_LOW_POWER)[SMU_FEATURE_THM_LOW_POWER_BIT] = {1, 45}, |
179 | FEA_MAP(SMUIO_LOW_POWER)[SMU_FEATURE_SMUIO_LOW_POWER_BIT] = {1, 46}, |
180 | FEA_MAP(MP1_LOW_POWER)[SMU_FEATURE_MP1_LOW_POWER_BIT] = {1, 47}, |
181 | FEA_MAP(DS_VCN)[SMU_FEATURE_DS_VCN_BIT] = {1, 48}, |
182 | FEA_MAP(CPPC)[SMU_FEATURE_CPPC_BIT] = {1, 49}, |
183 | FEA_MAP(OS_CSTATES)[SMU_FEATURE_OS_CSTATES_BIT] = {1, 50}, |
184 | FEA_MAP(ISP_DPM)[SMU_FEATURE_ISP_DPM_BIT] = {1, 51}, |
185 | FEA_MAP(A55_DPM)[SMU_FEATURE_A55_DPM_BIT] = {1, 52}, |
186 | FEA_MAP(CVIP_DSP_DPM)[SMU_FEATURE_CVIP_DSP_DPM_BIT] = {1, 53}, |
187 | FEA_MAP(MSMU_LOW_POWER)[SMU_FEATURE_MSMU_LOW_POWER_BIT] = {1, 54}, |
188 | FEA_MAP_REVERSE(SOCCLK)[SMU_FEATURE_DPM_SOCCLK_BIT] = {1, 14}, |
189 | FEA_MAP_REVERSE(FCLK)[SMU_FEATURE_DPM_FCLK_BIT] = {1, 13}, |
190 | FEA_MAP_HALF_REVERSE(GFX)[SMU_FEATURE_DPM_GFXCLK_BIT] = {1, 19}, |
191 | }; |
192 | |
193 | static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = { |
194 | TAB_MAP_VALID(WATERMARKS)[SMU_TABLE_WATERMARKS] = {1, 1}, |
195 | TAB_MAP_VALID(SMU_METRICS)[SMU_TABLE_SMU_METRICS] = {1, 7}, |
196 | TAB_MAP_VALID(CUSTOM_DPM)[SMU_TABLE_CUSTOM_DPM] = {1, 2}, |
197 | TAB_MAP_VALID(DPMCLOCKS)[SMU_TABLE_DPMCLOCKS] = {1, 4}, |
198 | }; |
199 | |
200 | static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { |
201 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT)[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = {1, (0)}, |
202 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT)[PP_SMC_POWER_PROFILE_VIDEO] = {1, (2)}, |
203 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT)[PP_SMC_POWER_PROFILE_VR] = {1, (3)}, |
204 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT)[PP_SMC_POWER_PROFILE_COMPUTE] = {1, (4)}, |
205 | WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT)[PP_SMC_POWER_PROFILE_CUSTOM] = {1, (5)}, |
206 | }; |
207 | |
208 | static const uint8_t vangogh_throttler_map[] = { |
209 | [THROTTLER_STATUS_BIT_SPL0] = (SMU_THROTTLER_SPL_BIT4), |
210 | [THROTTLER_STATUS_BIT_FPPT1] = (SMU_THROTTLER_FPPT_BIT5), |
211 | [THROTTLER_STATUS_BIT_SPPT2] = (SMU_THROTTLER_SPPT_BIT6), |
212 | [THROTTLER_STATUS_BIT_SPPT_APU3] = (SMU_THROTTLER_SPPT_APU_BIT7), |
213 | [THROTTLER_STATUS_BIT_THM_CORE4] = (SMU_THROTTLER_TEMP_CORE_BIT33), |
214 | [THROTTLER_STATUS_BIT_THM_GFX5] = (SMU_THROTTLER_TEMP_GPU_BIT32), |
215 | [THROTTLER_STATUS_BIT_THM_SOC6] = (SMU_THROTTLER_TEMP_SOC_BIT37), |
216 | [THROTTLER_STATUS_BIT_TDC_VDD7] = (SMU_THROTTLER_TDC_VDD_BIT19), |
217 | [THROTTLER_STATUS_BIT_TDC_SOC8] = (SMU_THROTTLER_TDC_SOC_BIT17), |
218 | [THROTTLER_STATUS_BIT_TDC_GFX9] = (SMU_THROTTLER_TDC_GFX_BIT16), |
219 | [THROTTLER_STATUS_BIT_TDC_CVIP10] = (SMU_THROTTLER_TDC_CVIP_BIT20), |
220 | }; |
221 | |
222 | static int vangogh_tables_init(struct smu_context *smu) |
223 | { |
224 | struct smu_table_context *smu_table = &smu->smu_table; |
225 | struct smu_table *tables = smu_table->tables; |
226 | uint32_t if_version; |
227 | uint32_t smu_version; |
228 | uint32_t ret = 0; |
229 | |
230 | ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); |
231 | if (ret) { |
232 | return ret; |
233 | } |
234 | |
235 | SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),do { tables[SMU_TABLE_WATERMARKS].size = sizeof(Watermarks_t) ; tables[SMU_TABLE_WATERMARKS].align = (1 << 12); tables [SMU_TABLE_WATERMARKS].domain = 0x4; } while (0) |
236 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_WATERMARKS].size = sizeof(Watermarks_t) ; tables[SMU_TABLE_WATERMARKS].align = (1 << 12); tables [SMU_TABLE_WATERMARKS].domain = 0x4; } while (0); |
237 | SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),do { tables[SMU_TABLE_DPMCLOCKS].size = sizeof(DpmClocks_t); tables [SMU_TABLE_DPMCLOCKS].align = (1 << 12); tables[SMU_TABLE_DPMCLOCKS ].domain = 0x4; } while (0) |
238 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_DPMCLOCKS].size = sizeof(DpmClocks_t); tables [SMU_TABLE_DPMCLOCKS].align = (1 << 12); tables[SMU_TABLE_DPMCLOCKS ].domain = 0x4; } while (0); |
239 | SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,do { tables[SMU_TABLE_PMSTATUSLOG].size = 0x19000; tables[SMU_TABLE_PMSTATUSLOG ].align = (1 << 12); tables[SMU_TABLE_PMSTATUSLOG].domain = 0x4; } while (0) |
240 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_PMSTATUSLOG].size = 0x19000; tables[SMU_TABLE_PMSTATUSLOG ].align = (1 << 12); tables[SMU_TABLE_PMSTATUSLOG].domain = 0x4; } while (0); |
241 | SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),do { tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffExt_t ); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].align = (1 << 12); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].domain = 0x4; } while (0) |
242 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffExt_t ); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].align = (1 << 12); tables[SMU_TABLE_ACTIVITY_MONITOR_COEFF].domain = 0x4; } while (0); |
243 | |
244 | if (if_version < 0x3) { |
245 | SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),do { tables[SMU_TABLE_SMU_METRICS].size = sizeof(SmuMetrics_legacy_t ); tables[SMU_TABLE_SMU_METRICS].align = (1 << 12); tables [SMU_TABLE_SMU_METRICS].domain = 0x4; } while (0) |
246 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_SMU_METRICS].size = sizeof(SmuMetrics_legacy_t ); tables[SMU_TABLE_SMU_METRICS].align = (1 << 12); tables [SMU_TABLE_SMU_METRICS].domain = 0x4; } while (0); |
247 | smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL(0x0001 | 0x0004)); |
248 | } else { |
249 | SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),do { tables[SMU_TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t ); tables[SMU_TABLE_SMU_METRICS].align = (1 << 12); tables [SMU_TABLE_SMU_METRICS].domain = 0x4; } while (0) |
250 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t ); tables[SMU_TABLE_SMU_METRICS].align = (1 << 12); tables [SMU_TABLE_SMU_METRICS].domain = 0x4; } while (0); |
251 | smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL(0x0001 | 0x0004)); |
252 | } |
253 | if (!smu_table->metrics_table) |
254 | goto err0_out; |
255 | smu_table->metrics_time = 0; |
256 | |
257 | if (smu_version >= 0x043F3E00) |
258 | smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3); |
259 | else |
260 | smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); |
261 | smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL(0x0001 | 0x0004)); |
262 | if (!smu_table->gpu_metrics_table) |
263 | goto err1_out; |
264 | |
265 | smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL(0x0001 | 0x0004)); |
266 | if (!smu_table->watermarks_table) |
267 | goto err2_out; |
268 | |
269 | smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL(0x0001 | 0x0004)); |
270 | if (!smu_table->clocks_table) |
271 | goto err3_out; |
272 | |
273 | return 0; |
274 | |
275 | err3_out: |
276 | kfree(smu_table->watermarks_table); |
277 | err2_out: |
278 | kfree(smu_table->gpu_metrics_table); |
279 | err1_out: |
280 | kfree(smu_table->metrics_table); |
281 | err0_out: |
282 | return -ENOMEM12; |
283 | } |
284 | |
285 | static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, |
286 | MetricsMember_t member, |
287 | uint32_t *value) |
288 | { |
289 | struct smu_table_context *smu_table = &smu->smu_table; |
290 | SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; |
291 | int ret = 0; |
292 | |
293 | ret = smu_cmn_get_metrics_table(smu, |
294 | NULL((void *)0), |
295 | false0); |
296 | if (ret) |
297 | return ret; |
298 | |
299 | switch (member) { |
300 | case METRICS_CURR_GFXCLK: |
301 | *value = metrics->GfxclkFrequency; |
302 | break; |
303 | case METRICS_AVERAGE_SOCCLK: |
304 | *value = metrics->SocclkFrequency; |
305 | break; |
306 | case METRICS_AVERAGE_VCLK: |
307 | *value = metrics->VclkFrequency; |
308 | break; |
309 | case METRICS_AVERAGE_DCLK: |
310 | *value = metrics->DclkFrequency; |
311 | break; |
312 | case METRICS_CURR_UCLK: |
313 | *value = metrics->MemclkFrequency; |
314 | break; |
315 | case METRICS_AVERAGE_GFXACTIVITY: |
316 | *value = metrics->GfxActivity / 100; |
317 | break; |
318 | case METRICS_AVERAGE_VCNACTIVITY: |
319 | *value = metrics->UvdActivity; |
320 | break; |
321 | case METRICS_AVERAGE_SOCKETPOWER: |
322 | *value = (metrics->CurrentSocketPower << 8) / |
323 | 1000 ; |
324 | break; |
325 | case METRICS_TEMPERATURE_EDGE: |
326 | *value = metrics->GfxTemperature / 100 * |
327 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
328 | break; |
329 | case METRICS_TEMPERATURE_HOTSPOT: |
330 | *value = metrics->SocTemperature / 100 * |
331 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
332 | break; |
333 | case METRICS_THROTTLER_STATUS: |
334 | *value = metrics->ThrottlerStatus; |
335 | break; |
336 | case METRICS_VOLTAGE_VDDGFX: |
337 | *value = metrics->Voltage[2]; |
338 | break; |
339 | case METRICS_VOLTAGE_VDDSOC: |
340 | *value = metrics->Voltage[1]; |
341 | break; |
342 | case METRICS_AVERAGE_CPUCLK: |
343 | memcpy(value, &metrics->CoreFrequency[0],__builtin_memcpy((value), (&metrics->CoreFrequency[0]) , (smu->cpu_core_num * sizeof(uint16_t))) |
344 | smu->cpu_core_num * sizeof(uint16_t))__builtin_memcpy((value), (&metrics->CoreFrequency[0]) , (smu->cpu_core_num * sizeof(uint16_t))); |
345 | break; |
346 | default: |
347 | *value = UINT_MAX0xffffffffU; |
348 | break; |
349 | } |
350 | |
351 | return ret; |
352 | } |
353 | |
354 | static int vangogh_get_smu_metrics_data(struct smu_context *smu, |
355 | MetricsMember_t member, |
356 | uint32_t *value) |
357 | { |
358 | struct smu_table_context *smu_table = &smu->smu_table; |
359 | SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; |
360 | int ret = 0; |
361 | |
362 | ret = smu_cmn_get_metrics_table(smu, |
363 | NULL((void *)0), |
364 | false0); |
365 | if (ret) |
366 | return ret; |
367 | |
368 | switch (member) { |
369 | case METRICS_CURR_GFXCLK: |
370 | *value = metrics->Current.GfxclkFrequency; |
371 | break; |
372 | case METRICS_AVERAGE_SOCCLK: |
373 | *value = metrics->Current.SocclkFrequency; |
374 | break; |
375 | case METRICS_AVERAGE_VCLK: |
376 | *value = metrics->Current.VclkFrequency; |
377 | break; |
378 | case METRICS_AVERAGE_DCLK: |
379 | *value = metrics->Current.DclkFrequency; |
380 | break; |
381 | case METRICS_CURR_UCLK: |
382 | *value = metrics->Current.MemclkFrequency; |
383 | break; |
384 | case METRICS_AVERAGE_GFXACTIVITY: |
385 | *value = metrics->Current.GfxActivity; |
386 | break; |
387 | case METRICS_AVERAGE_VCNACTIVITY: |
388 | *value = metrics->Current.UvdActivity; |
389 | break; |
390 | case METRICS_AVERAGE_SOCKETPOWER: |
391 | *value = (metrics->Current.CurrentSocketPower << 8) / |
392 | 1000; |
393 | break; |
394 | case METRICS_TEMPERATURE_EDGE: |
395 | *value = metrics->Current.GfxTemperature / 100 * |
396 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
397 | break; |
398 | case METRICS_TEMPERATURE_HOTSPOT: |
399 | *value = metrics->Current.SocTemperature / 100 * |
400 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000; |
401 | break; |
402 | case METRICS_THROTTLER_STATUS: |
403 | *value = metrics->Current.ThrottlerStatus; |
404 | break; |
405 | case METRICS_VOLTAGE_VDDGFX: |
406 | *value = metrics->Current.Voltage[2]; |
407 | break; |
408 | case METRICS_VOLTAGE_VDDSOC: |
409 | *value = metrics->Current.Voltage[1]; |
410 | break; |
411 | case METRICS_AVERAGE_CPUCLK: |
412 | memcpy(value, &metrics->Current.CoreFrequency[0],__builtin_memcpy((value), (&metrics->Current.CoreFrequency [0]), (smu->cpu_core_num * sizeof(uint16_t))) |
413 | smu->cpu_core_num * sizeof(uint16_t))__builtin_memcpy((value), (&metrics->Current.CoreFrequency [0]), (smu->cpu_core_num * sizeof(uint16_t))); |
414 | break; |
415 | default: |
416 | *value = UINT_MAX0xffffffffU; |
417 | break; |
418 | } |
419 | |
420 | return ret; |
421 | } |
422 | |
423 | static int vangogh_common_get_smu_metrics_data(struct smu_context *smu, |
424 | MetricsMember_t member, |
425 | uint32_t *value) |
426 | { |
427 | struct amdgpu_device *adev = smu->adev; |
428 | uint32_t if_version; |
429 | int ret = 0; |
430 | |
431 | ret = smu_cmn_get_smc_version(smu, &if_version, NULL((void *)0)); |
432 | if (ret) { |
433 | dev_err(adev->dev, "Failed to get smu if version!\n")printf("drm:pid%d:%s *ERROR* " "Failed to get smu if version!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
434 | return ret; |
435 | } |
436 | |
437 | if (if_version < 0x3) |
438 | ret = vangogh_get_legacy_smu_metrics_data(smu, member, value); |
439 | else |
440 | ret = vangogh_get_smu_metrics_data(smu, member, value); |
441 | |
442 | return ret; |
443 | } |
444 | |
445 | static int vangogh_allocate_dpm_context(struct smu_context *smu) |
446 | { |
447 | struct smu_dpm_context *smu_dpm = &smu->smu_dpm; |
448 | |
449 | smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), |
450 | GFP_KERNEL(0x0001 | 0x0004)); |
451 | if (!smu_dpm->dpm_context) |
452 | return -ENOMEM12; |
453 | |
454 | smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); |
455 | |
456 | return 0; |
457 | } |
458 | |
459 | static int vangogh_init_smc_tables(struct smu_context *smu) |
460 | { |
461 | int ret = 0; |
462 | |
463 | ret = vangogh_tables_init(smu); |
464 | if (ret) |
465 | return ret; |
466 | |
467 | ret = vangogh_allocate_dpm_context(smu); |
468 | if (ret) |
469 | return ret; |
470 | |
471 | #ifdef CONFIG_X861 |
472 | /* AMD x86 APU only */ |
473 | #ifdef __linux__ |
474 | smu->cpu_core_num = boot_cpu_data.x86_max_cores; |
475 | #else |
476 | { |
477 | uint32_t eax, ebx, ecx, edx; |
478 | CPUID_LEAF(4, 0, eax, ebx, ecx, edx)__asm volatile("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (4), "c" (0)); |
479 | smu->cpu_core_num = ((eax >> 26) & 0x3f) + 1; |
480 | } |
481 | #endif |
482 | #else |
483 | smu->cpu_core_num = 4; |
484 | #endif |
485 | |
486 | return smu_v11_0_init_smc_tables(smu); |
487 | } |
488 | |
489 | static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool_Bool enable) |
490 | { |
491 | int ret = 0; |
492 | |
493 | if (enable) { |
494 | /* vcn dpm on is a prerequisite for vcn power gate messages */ |
495 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL((void *)0)); |
496 | if (ret) |
497 | return ret; |
498 | } else { |
499 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL((void *)0)); |
500 | if (ret) |
501 | return ret; |
502 | } |
503 | |
504 | return ret; |
505 | } |
506 | |
507 | static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool_Bool enable) |
508 | { |
509 | int ret = 0; |
510 | |
511 | if (enable) { |
512 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL((void *)0)); |
513 | if (ret) |
514 | return ret; |
515 | } else { |
516 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL((void *)0)); |
517 | if (ret) |
518 | return ret; |
519 | } |
520 | |
521 | return ret; |
522 | } |
523 | |
524 | static bool_Bool vangogh_is_dpm_running(struct smu_context *smu) |
525 | { |
526 | struct amdgpu_device *adev = smu->adev; |
527 | int ret = 0; |
528 | uint64_t feature_enabled; |
529 | |
530 | /* we need to re-init after suspend so return false */ |
531 | if (adev->in_suspend) |
532 | return false0; |
533 | |
534 | ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); |
535 | |
536 | if (ret) |
537 | return false0; |
538 | |
539 | return !!(feature_enabled & SMC_DPM_FEATURE( (1ULL << 0) | (1ULL << 11) | (1ULL << 13) | (1ULL << 14) | (1ULL << 15) | (1ULL << 16 ) | (1ULL << 17) | (1ULL << 18)| (1ULL << 19 ))); |
540 | } |
541 | |
542 | static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, |
543 | uint32_t dpm_level, uint32_t *freq) |
544 | { |
545 | DpmClocks_t *clk_table = smu->smu_table.clocks_table; |
546 | |
547 | if (!clk_table || clk_type >= SMU_CLK_COUNT) |
548 | return -EINVAL22; |
549 | |
550 | switch (clk_type) { |
551 | case SMU_SOCCLK: |
552 | if (dpm_level >= clk_table->NumSocClkLevelsEnabled) |
553 | return -EINVAL22; |
554 | *freq = clk_table->SocClocks[dpm_level]; |
555 | break; |
556 | case SMU_VCLK: |
557 | if (dpm_level >= clk_table->VcnClkLevelsEnabled) |
558 | return -EINVAL22; |
559 | *freq = clk_table->VcnClocks[dpm_level].vclk; |
560 | break; |
561 | case SMU_DCLK: |
562 | if (dpm_level >= clk_table->VcnClkLevelsEnabled) |
563 | return -EINVAL22; |
564 | *freq = clk_table->VcnClocks[dpm_level].dclk; |
565 | break; |
566 | case SMU_UCLK: |
567 | case SMU_MCLK: |
568 | if (dpm_level >= clk_table->NumDfPstatesEnabled) |
569 | return -EINVAL22; |
570 | *freq = clk_table->DfPstateTable[dpm_level].memclk; |
571 | |
572 | break; |
573 | case SMU_FCLK: |
574 | if (dpm_level >= clk_table->NumDfPstatesEnabled) |
575 | return -EINVAL22; |
576 | *freq = clk_table->DfPstateTable[dpm_level].fclk; |
577 | break; |
578 | default: |
579 | return -EINVAL22; |
580 | } |
581 | |
582 | return 0; |
583 | } |
584 | |
585 | static int vangogh_print_legacy_clk_levels(struct smu_context *smu, |
586 | enum smu_clk_type clk_type, char *buf) |
587 | { |
588 | DpmClocks_t *clk_table = smu->smu_table.clocks_table; |
589 | SmuMetrics_legacy_t metrics; |
590 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); |
591 | int i, idx, size = 0, ret = 0; |
592 | uint32_t cur_value = 0, value = 0, count = 0; |
593 | bool_Bool cur_value_match_level = false0; |
594 | |
595 | memset(&metrics, 0, sizeof(metrics))__builtin_memset((&metrics), (0), (sizeof(metrics))); |
596 | |
597 | ret = smu_cmn_get_metrics_table(smu, &metrics, false0); |
598 | if (ret) |
599 | return ret; |
600 | |
601 | smu_cmn_get_sysfs_buf(&buf, &size); |
602 | |
603 | switch (clk_type) { |
604 | case SMU_OD_SCLK: |
605 | if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { |
606 | size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); |
607 | size += sysfs_emit_at(buf, size, "0: %10uMhz\n", |
608 | (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); |
609 | size += sysfs_emit_at(buf, size, "1: %10uMhz\n", |
610 | (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); |
611 | } |
612 | break; |
613 | case SMU_OD_CCLK: |
614 | if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { |
615 | size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); |
616 | size += sysfs_emit_at(buf, size, "0: %10uMhz\n", |
617 | (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); |
618 | size += sysfs_emit_at(buf, size, "1: %10uMhz\n", |
619 | (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); |
620 | } |
621 | break; |
622 | case SMU_OD_RANGE: |
623 | if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { |
624 | size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); |
625 | size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", |
626 | smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); |
627 | size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", |
628 | smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); |
629 | } |
630 | break; |
631 | case SMU_SOCCLK: |
632 | /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ |
633 | count = clk_table->NumSocClkLevelsEnabled; |
634 | cur_value = metrics.SocclkFrequency; |
635 | break; |
636 | case SMU_VCLK: |
637 | count = clk_table->VcnClkLevelsEnabled; |
638 | cur_value = metrics.VclkFrequency; |
639 | break; |
640 | case SMU_DCLK: |
641 | count = clk_table->VcnClkLevelsEnabled; |
642 | cur_value = metrics.DclkFrequency; |
643 | break; |
644 | case SMU_MCLK: |
645 | count = clk_table->NumDfPstatesEnabled; |
646 | cur_value = metrics.MemclkFrequency; |
647 | break; |
648 | case SMU_FCLK: |
649 | count = clk_table->NumDfPstatesEnabled; |
650 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); |
651 | if (ret) |
652 | return ret; |
653 | break; |
654 | default: |
655 | break; |
656 | } |
657 | |
658 | switch (clk_type) { |
659 | case SMU_SOCCLK: |
660 | case SMU_VCLK: |
661 | case SMU_DCLK: |
662 | case SMU_MCLK: |
663 | case SMU_FCLK: |
664 | for (i = 0; i < count; i++) { |
665 | idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; |
666 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); |
667 | if (ret) |
668 | return ret; |
669 | if (!value) |
670 | continue; |
671 | size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, |
672 | cur_value == value ? "*" : ""); |
673 | if (cur_value == value) |
674 | cur_value_match_level = true1; |
675 | } |
676 | |
677 | if (!cur_value_match_level) |
678 | size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); |
679 | break; |
680 | default: |
681 | break; |
682 | } |
683 | |
684 | return size; |
685 | } |
686 | |
687 | static int vangogh_print_clk_levels(struct smu_context *smu, |
688 | enum smu_clk_type clk_type, char *buf) |
689 | { |
690 | DpmClocks_t *clk_table = smu->smu_table.clocks_table; |
691 | SmuMetrics_t metrics; |
692 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); |
693 | int i, idx, size = 0, ret = 0; |
694 | uint32_t cur_value = 0, value = 0, count = 0; |
695 | bool_Bool cur_value_match_level = false0; |
696 | uint32_t min, max; |
697 | |
698 | memset(&metrics, 0, sizeof(metrics))__builtin_memset((&metrics), (0), (sizeof(metrics))); |
699 | |
700 | ret = smu_cmn_get_metrics_table(smu, &metrics, false0); |
701 | if (ret) |
702 | return ret; |
703 | |
704 | smu_cmn_get_sysfs_buf(&buf, &size); |
705 | |
706 | switch (clk_type) { |
707 | case SMU_OD_SCLK: |
708 | if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { |
709 | size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); |
710 | size += sysfs_emit_at(buf, size, "0: %10uMhz\n", |
711 | (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); |
712 | size += sysfs_emit_at(buf, size, "1: %10uMhz\n", |
713 | (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); |
714 | } |
715 | break; |
716 | case SMU_OD_CCLK: |
717 | if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { |
718 | size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); |
719 | size += sysfs_emit_at(buf, size, "0: %10uMhz\n", |
720 | (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); |
721 | size += sysfs_emit_at(buf, size, "1: %10uMhz\n", |
722 | (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); |
723 | } |
724 | break; |
725 | case SMU_OD_RANGE: |
726 | if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { |
727 | size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); |
728 | size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", |
729 | smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); |
730 | size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", |
731 | smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); |
732 | } |
733 | break; |
734 | case SMU_SOCCLK: |
735 | /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ |
736 | count = clk_table->NumSocClkLevelsEnabled; |
737 | cur_value = metrics.Current.SocclkFrequency; |
738 | break; |
739 | case SMU_VCLK: |
740 | count = clk_table->VcnClkLevelsEnabled; |
741 | cur_value = metrics.Current.VclkFrequency; |
742 | break; |
743 | case SMU_DCLK: |
744 | count = clk_table->VcnClkLevelsEnabled; |
745 | cur_value = metrics.Current.DclkFrequency; |
746 | break; |
747 | case SMU_MCLK: |
748 | count = clk_table->NumDfPstatesEnabled; |
749 | cur_value = metrics.Current.MemclkFrequency; |
750 | break; |
751 | case SMU_FCLK: |
752 | count = clk_table->NumDfPstatesEnabled; |
753 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); |
754 | if (ret) |
755 | return ret; |
756 | break; |
757 | case SMU_GFXCLK: |
758 | case SMU_SCLK: |
759 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value); |
760 | if (ret) { |
761 | return ret; |
762 | } |
763 | break; |
764 | default: |
765 | break; |
766 | } |
767 | |
768 | switch (clk_type) { |
769 | case SMU_SOCCLK: |
770 | case SMU_VCLK: |
771 | case SMU_DCLK: |
772 | case SMU_MCLK: |
773 | case SMU_FCLK: |
774 | for (i = 0; i < count; i++) { |
775 | idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; |
776 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); |
777 | if (ret) |
778 | return ret; |
779 | if (!value) |
780 | continue; |
781 | size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, |
782 | cur_value == value ? "*" : ""); |
783 | if (cur_value == value) |
784 | cur_value_match_level = true1; |
785 | } |
786 | |
787 | if (!cur_value_match_level) |
788 | size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); |
789 | break; |
790 | case SMU_GFXCLK: |
791 | case SMU_SCLK: |
792 | min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; |
793 | max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; |
794 | if (cur_value == max) |
795 | i = 2; |
796 | else if (cur_value == min) |
797 | i = 0; |
798 | else |
799 | i = 1; |
800 | size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, |
801 | i == 0 ? "*" : ""); |
802 | size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", |
803 | i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK1100, |
804 | i == 1 ? "*" : ""); |
805 | size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, |
806 | i == 2 ? "*" : ""); |
807 | break; |
808 | default: |
809 | break; |
810 | } |
811 | |
812 | return size; |
813 | } |
814 | |
815 | static int vangogh_common_print_clk_levels(struct smu_context *smu, |
816 | enum smu_clk_type clk_type, char *buf) |
817 | { |
818 | struct amdgpu_device *adev = smu->adev; |
Value stored to 'adev' during its initialization is never read | |
819 | uint32_t if_version; |
820 | int ret = 0; |
821 | |
822 | ret = smu_cmn_get_smc_version(smu, &if_version, NULL((void *)0)); |
823 | if (ret) { |
824 | dev_err(adev->dev, "Failed to get smu if version!\n")printf("drm:pid%d:%s *ERROR* " "Failed to get smu if version!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
825 | return ret; |
826 | } |
827 | |
828 | if (if_version < 0x3) |
829 | ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf); |
830 | else |
831 | ret = vangogh_print_clk_levels(smu, clk_type, buf); |
832 | |
833 | return ret; |
834 | } |
835 | |
836 | static int vangogh_get_profiling_clk_mask(struct smu_context *smu, |
837 | enum amd_dpm_forced_level level, |
838 | uint32_t *vclk_mask, |
839 | uint32_t *dclk_mask, |
840 | uint32_t *mclk_mask, |
841 | uint32_t *fclk_mask, |
842 | uint32_t *soc_mask) |
843 | { |
844 | DpmClocks_t *clk_table = smu->smu_table.clocks_table; |
845 | |
846 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { |
847 | if (mclk_mask) |
848 | *mclk_mask = clk_table->NumDfPstatesEnabled - 1; |
849 | |
850 | if (fclk_mask) |
851 | *fclk_mask = clk_table->NumDfPstatesEnabled - 1; |
852 | |
853 | if (soc_mask) |
854 | *soc_mask = 0; |
855 | } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { |
856 | if (mclk_mask) |
857 | *mclk_mask = 0; |
858 | |
859 | if (fclk_mask) |
860 | *fclk_mask = 0; |
861 | |
862 | if (soc_mask) |
863 | *soc_mask = 1; |
864 | |
865 | if (vclk_mask) |
866 | *vclk_mask = 1; |
867 | |
868 | if (dclk_mask) |
869 | *dclk_mask = 1; |
870 | } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) { |
871 | if (mclk_mask) |
872 | *mclk_mask = 0; |
873 | |
874 | if (fclk_mask) |
875 | *fclk_mask = 0; |
876 | |
877 | if (soc_mask) |
878 | *soc_mask = 1; |
879 | |
880 | if (vclk_mask) |
881 | *vclk_mask = 1; |
882 | |
883 | if (dclk_mask) |
884 | *dclk_mask = 1; |
885 | } |
886 | |
887 | return 0; |
888 | } |
889 | |
890 | static bool_Bool vangogh_clk_dpm_is_enabled(struct smu_context *smu, |
891 | enum smu_clk_type clk_type) |
892 | { |
893 | enum smu_feature_mask feature_id = 0; |
894 | |
895 | switch (clk_type) { |
896 | case SMU_MCLK: |
897 | case SMU_UCLK: |
898 | case SMU_FCLK: |
899 | feature_id = SMU_FEATURE_DPM_FCLK_BIT; |
900 | break; |
901 | case SMU_GFXCLK: |
902 | case SMU_SCLK: |
903 | feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; |
904 | break; |
905 | case SMU_SOCCLK: |
906 | feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; |
907 | break; |
908 | case SMU_VCLK: |
909 | case SMU_DCLK: |
910 | feature_id = SMU_FEATURE_VCN_DPM_BIT; |
911 | break; |
912 | default: |
913 | return true1; |
914 | } |
915 | |
916 | if (!smu_cmn_feature_is_enabled(smu, feature_id)) |
917 | return false0; |
918 | |
919 | return true1; |
920 | } |
921 | |
922 | static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu, |
923 | enum smu_clk_type clk_type, |
924 | uint32_t *min, |
925 | uint32_t *max) |
926 | { |
927 | int ret = 0; |
928 | uint32_t soc_mask; |
929 | uint32_t vclk_mask; |
930 | uint32_t dclk_mask; |
931 | uint32_t mclk_mask; |
932 | uint32_t fclk_mask; |
933 | uint32_t clock_limit; |
934 | |
935 | if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) { |
936 | switch (clk_type) { |
937 | case SMU_MCLK: |
938 | case SMU_UCLK: |
939 | clock_limit = smu->smu_table.boot_values.uclk; |
940 | break; |
941 | case SMU_FCLK: |
942 | clock_limit = smu->smu_table.boot_values.fclk; |
943 | break; |
944 | case SMU_GFXCLK: |
945 | case SMU_SCLK: |
946 | clock_limit = smu->smu_table.boot_values.gfxclk; |
947 | break; |
948 | case SMU_SOCCLK: |
949 | clock_limit = smu->smu_table.boot_values.socclk; |
950 | break; |
951 | case SMU_VCLK: |
952 | clock_limit = smu->smu_table.boot_values.vclk; |
953 | break; |
954 | case SMU_DCLK: |
955 | clock_limit = smu->smu_table.boot_values.dclk; |
956 | break; |
957 | default: |
958 | clock_limit = 0; |
959 | break; |
960 | } |
961 | |
962 | /* clock in Mhz unit */ |
963 | if (min) |
964 | *min = clock_limit / 100; |
965 | if (max) |
966 | *max = clock_limit / 100; |
967 | |
968 | return 0; |
969 | } |
970 | if (max) { |
971 | ret = vangogh_get_profiling_clk_mask(smu, |
972 | AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, |
973 | &vclk_mask, |
974 | &dclk_mask, |
975 | &mclk_mask, |
976 | &fclk_mask, |
977 | &soc_mask); |
978 | if (ret) |
979 | goto failed; |
980 | |
981 | switch (clk_type) { |
982 | case SMU_UCLK: |
983 | case SMU_MCLK: |
984 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); |
985 | if (ret) |
986 | goto failed; |
987 | break; |
988 | case SMU_SOCCLK: |
989 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max); |
990 | if (ret) |
991 | goto failed; |
992 | break; |
993 | case SMU_FCLK: |
994 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max); |
995 | if (ret) |
996 | goto failed; |
997 | break; |
998 | case SMU_VCLK: |
999 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max); |
1000 | if (ret) |
1001 | goto failed; |
1002 | break; |
1003 | case SMU_DCLK: |
1004 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max); |
1005 | if (ret) |
1006 | goto failed; |
1007 | break; |
1008 | default: |
1009 | ret = -EINVAL22; |
1010 | goto failed; |
1011 | } |
1012 | } |
1013 | if (min) { |
1014 | switch (clk_type) { |
1015 | case SMU_UCLK: |
1016 | case SMU_MCLK: |
1017 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min); |
1018 | if (ret) |
1019 | goto failed; |
1020 | break; |
1021 | case SMU_SOCCLK: |
1022 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min); |
1023 | if (ret) |
1024 | goto failed; |
1025 | break; |
1026 | case SMU_FCLK: |
1027 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min); |
1028 | if (ret) |
1029 | goto failed; |
1030 | break; |
1031 | case SMU_VCLK: |
1032 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min); |
1033 | if (ret) |
1034 | goto failed; |
1035 | break; |
1036 | case SMU_DCLK: |
1037 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min); |
1038 | if (ret) |
1039 | goto failed; |
1040 | break; |
1041 | default: |
1042 | ret = -EINVAL22; |
1043 | goto failed; |
1044 | } |
1045 | } |
1046 | failed: |
1047 | return ret; |
1048 | } |
1049 | |
1050 | static int vangogh_get_power_profile_mode(struct smu_context *smu, |
1051 | char *buf) |
1052 | { |
1053 | uint32_t i, size = 0; |
1054 | int16_t workload_type = 0; |
1055 | |
1056 | if (!buf) |
1057 | return -EINVAL22; |
1058 | |
1059 | for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { |
1060 | /* |
1061 | * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT |
1062 | * Not all profile modes are supported on vangogh. |
1063 | */ |
1064 | workload_type = smu_cmn_to_asic_specific_index(smu, |
1065 | CMN2ASIC_MAPPING_WORKLOAD, |
1066 | i); |
1067 | |
1068 | if (workload_type < 0) |
1069 | continue; |
1070 | |
1071 | size += sysfs_emit_at(buf, size, "%2d %14s%s\n", |
1072 | i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); |
1073 | } |
1074 | |
1075 | return size; |
1076 | } |
1077 | |
1078 | static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) |
1079 | { |
1080 | int workload_type, ret; |
1081 | uint32_t profile_mode = input[size]; |
1082 | |
1083 | if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { |
1084 | dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode)printf("drm:pid%d:%s *ERROR* " "Invalid power profile mode %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , profile_mode ); |
1085 | return -EINVAL22; |
1086 | } |
1087 | |
1088 | if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || |
1089 | profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) |
1090 | return 0; |
1091 | |
1092 | /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ |
1093 | workload_type = smu_cmn_to_asic_specific_index(smu, |
1094 | CMN2ASIC_MAPPING_WORKLOAD, |
1095 | profile_mode); |
1096 | if (workload_type < 0) { |
1097 | dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",do { } while(0) |
1098 | profile_mode)do { } while(0); |
1099 | return -EINVAL22; |
1100 | } |
1101 | |
1102 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, |
1103 | 1 << workload_type, |
1104 | NULL((void *)0)); |
1105 | if (ret) { |
1106 | dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",printf("drm:pid%d:%s *ERROR* " "Fail to set workload type %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , workload_type ) |
1107 | workload_type)printf("drm:pid%d:%s *ERROR* " "Fail to set workload type %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , workload_type ); |
1108 | return ret; |
1109 | } |
1110 | |
1111 | smu->power_profile_mode = profile_mode; |
1112 | |
1113 | return 0; |
1114 | } |
1115 | |
1116 | static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, |
1117 | enum smu_clk_type clk_type, |
1118 | uint32_t min, |
1119 | uint32_t max) |
1120 | { |
1121 | int ret = 0; |
1122 | |
1123 | if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) |
1124 | return 0; |
1125 | |
1126 | switch (clk_type) { |
1127 | case SMU_GFXCLK: |
1128 | case SMU_SCLK: |
1129 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1130 | SMU_MSG_SetHardMinGfxClk, |
1131 | min, NULL((void *)0)); |
1132 | if (ret) |
1133 | return ret; |
1134 | |
1135 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1136 | SMU_MSG_SetSoftMaxGfxClk, |
1137 | max, NULL((void *)0)); |
1138 | if (ret) |
1139 | return ret; |
1140 | break; |
1141 | case SMU_FCLK: |
1142 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1143 | SMU_MSG_SetHardMinFclkByFreq, |
1144 | min, NULL((void *)0)); |
1145 | if (ret) |
1146 | return ret; |
1147 | |
1148 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1149 | SMU_MSG_SetSoftMaxFclkByFreq, |
1150 | max, NULL((void *)0)); |
1151 | if (ret) |
1152 | return ret; |
1153 | break; |
1154 | case SMU_SOCCLK: |
1155 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1156 | SMU_MSG_SetHardMinSocclkByFreq, |
1157 | min, NULL((void *)0)); |
1158 | if (ret) |
1159 | return ret; |
1160 | |
1161 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1162 | SMU_MSG_SetSoftMaxSocclkByFreq, |
1163 | max, NULL((void *)0)); |
1164 | if (ret) |
1165 | return ret; |
1166 | break; |
1167 | case SMU_VCLK: |
1168 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1169 | SMU_MSG_SetHardMinVcn, |
1170 | min << 16, NULL((void *)0)); |
1171 | if (ret) |
1172 | return ret; |
1173 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1174 | SMU_MSG_SetSoftMaxVcn, |
1175 | max << 16, NULL((void *)0)); |
1176 | if (ret) |
1177 | return ret; |
1178 | break; |
1179 | case SMU_DCLK: |
1180 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1181 | SMU_MSG_SetHardMinVcn, |
1182 | min, NULL((void *)0)); |
1183 | if (ret) |
1184 | return ret; |
1185 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1186 | SMU_MSG_SetSoftMaxVcn, |
1187 | max, NULL((void *)0)); |
1188 | if (ret) |
1189 | return ret; |
1190 | break; |
1191 | default: |
1192 | return -EINVAL22; |
1193 | } |
1194 | |
1195 | return ret; |
1196 | } |
1197 | |
1198 | static int vangogh_force_clk_levels(struct smu_context *smu, |
1199 | enum smu_clk_type clk_type, uint32_t mask) |
1200 | { |
1201 | uint32_t soft_min_level = 0, soft_max_level = 0; |
1202 | uint32_t min_freq = 0, max_freq = 0; |
1203 | int ret = 0 ; |
1204 | |
1205 | soft_min_level = mask ? (ffs(mask) - 1) : 0; |
1206 | soft_max_level = mask ? (fls(mask) - 1) : 0; |
1207 | |
1208 | switch (clk_type) { |
1209 | case SMU_SOCCLK: |
1210 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, |
1211 | soft_min_level, &min_freq); |
1212 | if (ret) |
1213 | return ret; |
1214 | ret = vangogh_get_dpm_clk_limited(smu, clk_type, |
1215 | soft_max_level, &max_freq); |
1216 | if (ret) |
1217 | return ret; |
1218 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1219 | SMU_MSG_SetSoftMaxSocclkByFreq, |
1220 | max_freq, NULL((void *)0)); |
1221 | if (ret) |
1222 | return ret; |
1223 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1224 | SMU_MSG_SetHardMinSocclkByFreq, |
1225 | min_freq, NULL((void *)0)); |
1226 | if (ret) |
1227 | return ret; |
1228 | break; |
1229 | case SMU_FCLK: |
1230 | ret = vangogh_get_dpm_clk_limited(smu, |
1231 | clk_type, soft_min_level, &min_freq); |
1232 | if (ret) |
1233 | return ret; |
1234 | ret = vangogh_get_dpm_clk_limited(smu, |
1235 | clk_type, soft_max_level, &max_freq); |
1236 | if (ret) |
1237 | return ret; |
1238 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1239 | SMU_MSG_SetSoftMaxFclkByFreq, |
1240 | max_freq, NULL((void *)0)); |
1241 | if (ret) |
1242 | return ret; |
1243 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1244 | SMU_MSG_SetHardMinFclkByFreq, |
1245 | min_freq, NULL((void *)0)); |
1246 | if (ret) |
1247 | return ret; |
1248 | break; |
1249 | case SMU_VCLK: |
1250 | ret = vangogh_get_dpm_clk_limited(smu, |
1251 | clk_type, soft_min_level, &min_freq); |
1252 | if (ret) |
1253 | return ret; |
1254 | |
1255 | ret = vangogh_get_dpm_clk_limited(smu, |
1256 | clk_type, soft_max_level, &max_freq); |
1257 | if (ret) |
1258 | return ret; |
1259 | |
1260 | |
1261 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1262 | SMU_MSG_SetHardMinVcn, |
1263 | min_freq << 16, NULL((void *)0)); |
1264 | if (ret) |
1265 | return ret; |
1266 | |
1267 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1268 | SMU_MSG_SetSoftMaxVcn, |
1269 | max_freq << 16, NULL((void *)0)); |
1270 | if (ret) |
1271 | return ret; |
1272 | |
1273 | break; |
1274 | case SMU_DCLK: |
1275 | ret = vangogh_get_dpm_clk_limited(smu, |
1276 | clk_type, soft_min_level, &min_freq); |
1277 | if (ret) |
1278 | return ret; |
1279 | |
1280 | ret = vangogh_get_dpm_clk_limited(smu, |
1281 | clk_type, soft_max_level, &max_freq); |
1282 | if (ret) |
1283 | return ret; |
1284 | |
1285 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1286 | SMU_MSG_SetHardMinVcn, |
1287 | min_freq, NULL((void *)0)); |
1288 | if (ret) |
1289 | return ret; |
1290 | |
1291 | ret = smu_cmn_send_smc_msg_with_param(smu, |
1292 | SMU_MSG_SetSoftMaxVcn, |
1293 | max_freq, NULL((void *)0)); |
1294 | if (ret) |
1295 | return ret; |
1296 | |
1297 | break; |
1298 | default: |
1299 | break; |
1300 | } |
1301 | |
1302 | return ret; |
1303 | } |
1304 | |
1305 | static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool_Bool highest) |
1306 | { |
1307 | int ret = 0, i = 0; |
1308 | uint32_t min_freq, max_freq, force_freq; |
1309 | enum smu_clk_type clk_type; |
1310 | |
1311 | enum smu_clk_type clks[] = { |
1312 | SMU_SOCCLK, |
1313 | SMU_VCLK, |
1314 | SMU_DCLK, |
1315 | SMU_FCLK, |
1316 | }; |
1317 | |
1318 | for (i = 0; i < ARRAY_SIZE(clks)(sizeof((clks)) / sizeof((clks)[0])); i++) { |
1319 | clk_type = clks[i]; |
1320 | ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); |
1321 | if (ret) |
1322 | return ret; |
1323 | |
1324 | force_freq = highest ? max_freq : min_freq; |
1325 | ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq); |
1326 | if (ret) |
1327 | return ret; |
1328 | } |
1329 | |
1330 | return ret; |
1331 | } |
1332 | |
1333 | static int vangogh_unforce_dpm_levels(struct smu_context *smu) |
1334 | { |
1335 | int ret = 0, i = 0; |
1336 | uint32_t min_freq, max_freq; |
1337 | enum smu_clk_type clk_type; |
1338 | |
1339 | struct clk_feature_map { |
1340 | enum smu_clk_type clk_type; |
1341 | uint32_t feature; |
1342 | } clk_feature_map[] = { |
1343 | {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT}, |
1344 | {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, |
1345 | {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT}, |
1346 | {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT}, |
1347 | }; |
1348 | |
1349 | for (i = 0; i < ARRAY_SIZE(clk_feature_map)(sizeof((clk_feature_map)) / sizeof((clk_feature_map)[0])); i++) { |
1350 | |
1351 | if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature)) |
1352 | continue; |
1353 | |
1354 | clk_type = clk_feature_map[i].clk_type; |
1355 | |
1356 | ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq); |
1357 | |
1358 | if (ret) |
1359 | return ret; |
1360 | |
1361 | ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); |
1362 | |
1363 | if (ret) |
1364 | return ret; |
1365 | } |
1366 | |
1367 | return ret; |
1368 | } |
1369 | |
1370 | static int vangogh_set_peak_clock_by_device(struct smu_context *smu) |
1371 | { |
1372 | int ret = 0; |
1373 | uint32_t socclk_freq = 0, fclk_freq = 0; |
1374 | uint32_t vclk_freq = 0, dclk_freq = 0; |
1375 | |
1376 | ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL((void *)0), &fclk_freq); |
1377 | if (ret) |
1378 | return ret; |
1379 | |
1380 | ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq); |
1381 | if (ret) |
1382 | return ret; |
1383 | |
1384 | ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL((void *)0), &socclk_freq); |
1385 | if (ret) |
1386 | return ret; |
1387 | |
1388 | ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq); |
1389 | if (ret) |
1390 | return ret; |
1391 | |
1392 | ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL((void *)0), &vclk_freq); |
1393 | if (ret) |
1394 | return ret; |
1395 | |
1396 | ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq); |
1397 | if (ret) |
1398 | return ret; |
1399 | |
1400 | ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL((void *)0), &dclk_freq); |
1401 | if (ret) |
1402 | return ret; |
1403 | |
1404 | ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq); |
1405 | if (ret) |
1406 | return ret; |
1407 | |
1408 | return ret; |
1409 | } |
1410 | |
1411 | static int vangogh_set_performance_level(struct smu_context *smu, |
1412 | enum amd_dpm_forced_level level) |
1413 | { |
1414 | int ret = 0, i; |
1415 | uint32_t soc_mask, mclk_mask, fclk_mask; |
1416 | uint32_t vclk_mask = 0, dclk_mask = 0; |
1417 | |
1418 | smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; |
1419 | smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; |
1420 | |
1421 | switch (level) { |
1422 | case AMD_DPM_FORCED_LEVEL_HIGH: |
1423 | smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq; |
1424 | smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; |
1425 | |
1426 | |
1427 | ret = vangogh_force_dpm_limit_value(smu, true1); |
1428 | if (ret) |
1429 | return ret; |
1430 | break; |
1431 | case AMD_DPM_FORCED_LEVEL_LOW: |
1432 | smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; |
1433 | smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; |
1434 | |
1435 | ret = vangogh_force_dpm_limit_value(smu, false0); |
1436 | if (ret) |
1437 | return ret; |
1438 | break; |
1439 | case AMD_DPM_FORCED_LEVEL_AUTO: |
1440 | smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; |
1441 | smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; |
1442 | |
1443 | ret = vangogh_unforce_dpm_levels(smu); |
1444 | if (ret) |
1445 | return ret; |
1446 | break; |
1447 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: |
1448 | smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK1100; |
1449 | smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK1100; |
1450 | |
1451 | ret = vangogh_get_profiling_clk_mask(smu, level, |
1452 | &vclk_mask, |
1453 | &dclk_mask, |
1454 | &mclk_mask, |
1455 | &fclk_mask, |
1456 | &soc_mask); |
1457 | if (ret) |
1458 | return ret; |
1459 | |
1460 | vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); |
1461 | vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); |
1462 | vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); |
1463 | vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask); |
1464 | break; |
1465 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: |
1466 | smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; |
1467 | smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; |
1468 | break; |
1469 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: |
1470 | smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; |
1471 | smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; |
1472 | |
1473 | ret = vangogh_get_profiling_clk_mask(smu, level, |
1474 | NULL((void *)0), |
1475 | NULL((void *)0), |
1476 | &mclk_mask, |
1477 | &fclk_mask, |
1478 | NULL((void *)0)); |
1479 | if (ret) |
1480 | return ret; |
1481 | |
1482 | vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); |
1483 | break; |
1484 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: |
1485 | smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK1300; |
1486 | smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK1300; |
1487 | |
1488 | ret = vangogh_set_peak_clock_by_device(smu); |
1489 | if (ret) |
1490 | return ret; |
1491 | break; |
1492 | case AMD_DPM_FORCED_LEVEL_MANUAL: |
1493 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: |
1494 | default: |
1495 | return 0; |
1496 | } |
1497 | |
1498 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, |
1499 | smu->gfx_actual_hard_min_freq, NULL((void *)0)); |
1500 | if (ret) |
1501 | return ret; |
1502 | |
1503 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, |
1504 | smu->gfx_actual_soft_max_freq, NULL((void *)0)); |
1505 | if (ret) |
1506 | return ret; |
1507 | |
1508 | if (smu->adev->pm.fw_version >= 0x43f1b00) { |
1509 | for (i = 0; i < smu->cpu_core_num; i++) { |
1510 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, |
1511 | ((i << 20) |
1512 | | smu->cpu_actual_soft_min_freq), |
1513 | NULL((void *)0)); |
1514 | if (ret) |
1515 | return ret; |
1516 | |
1517 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, |
1518 | ((i << 20) |
1519 | | smu->cpu_actual_soft_max_freq), |
1520 | NULL((void *)0)); |
1521 | if (ret) |
1522 | return ret; |
1523 | } |
1524 | } |
1525 | |
1526 | return ret; |
1527 | } |
1528 | |
1529 | static int vangogh_read_sensor(struct smu_context *smu, |
1530 | enum amd_pp_sensors sensor, |
1531 | void *data, uint32_t *size) |
1532 | { |
1533 | int ret = 0; |
1534 | |
1535 | if (!data || !size) |
1536 | return -EINVAL22; |
1537 | |
1538 | switch (sensor) { |
1539 | case AMDGPU_PP_SENSOR_GPU_LOAD: |
1540 | ret = vangogh_common_get_smu_metrics_data(smu, |
1541 | METRICS_AVERAGE_GFXACTIVITY, |
1542 | (uint32_t *)data); |
1543 | *size = 4; |
1544 | break; |
1545 | case AMDGPU_PP_SENSOR_GPU_POWER: |
1546 | ret = vangogh_common_get_smu_metrics_data(smu, |
1547 | METRICS_AVERAGE_SOCKETPOWER, |
1548 | (uint32_t *)data); |
1549 | *size = 4; |
1550 | break; |
1551 | case AMDGPU_PP_SENSOR_EDGE_TEMP: |
1552 | ret = vangogh_common_get_smu_metrics_data(smu, |
1553 | METRICS_TEMPERATURE_EDGE, |
1554 | (uint32_t *)data); |
1555 | *size = 4; |
1556 | break; |
1557 | case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: |
1558 | ret = vangogh_common_get_smu_metrics_data(smu, |
1559 | METRICS_TEMPERATURE_HOTSPOT, |
1560 | (uint32_t *)data); |
1561 | *size = 4; |
1562 | break; |
1563 | case AMDGPU_PP_SENSOR_GFX_MCLK: |
1564 | ret = vangogh_common_get_smu_metrics_data(smu, |
1565 | METRICS_CURR_UCLK, |
1566 | (uint32_t *)data); |
1567 | *(uint32_t *)data *= 100; |
1568 | *size = 4; |
1569 | break; |
1570 | case AMDGPU_PP_SENSOR_GFX_SCLK: |
1571 | ret = vangogh_common_get_smu_metrics_data(smu, |
1572 | METRICS_CURR_GFXCLK, |
1573 | (uint32_t *)data); |
1574 | *(uint32_t *)data *= 100; |
1575 | *size = 4; |
1576 | break; |
1577 | case AMDGPU_PP_SENSOR_VDDGFX: |
1578 | ret = vangogh_common_get_smu_metrics_data(smu, |
1579 | METRICS_VOLTAGE_VDDGFX, |
1580 | (uint32_t *)data); |
1581 | *size = 4; |
1582 | break; |
1583 | case AMDGPU_PP_SENSOR_VDDNB: |
1584 | ret = vangogh_common_get_smu_metrics_data(smu, |
1585 | METRICS_VOLTAGE_VDDSOC, |
1586 | (uint32_t *)data); |
1587 | *size = 4; |
1588 | break; |
1589 | case AMDGPU_PP_SENSOR_CPU_CLK: |
1590 | ret = vangogh_common_get_smu_metrics_data(smu, |
1591 | METRICS_AVERAGE_CPUCLK, |
1592 | (uint32_t *)data); |
1593 | *size = smu->cpu_core_num * sizeof(uint16_t); |
1594 | break; |
1595 | default: |
1596 | ret = -EOPNOTSUPP45; |
1597 | break; |
1598 | } |
1599 | |
1600 | return ret; |
1601 | } |
1602 | |
1603 | static int vangogh_set_watermarks_table(struct smu_context *smu, |
1604 | struct pp_smu_wm_range_sets *clock_ranges) |
1605 | { |
1606 | int i; |
1607 | int ret = 0; |
1608 | Watermarks_t *table = smu->smu_table.watermarks_table; |
1609 | |
1610 | if (!table || !clock_ranges) |
1611 | return -EINVAL22; |
1612 | |
1613 | if (clock_ranges) { |
1614 | if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES4 || |
1615 | clock_ranges->num_writer_wm_sets > NUM_WM_RANGES4) |
1616 | return -EINVAL22; |
1617 | |
1618 | for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { |
1619 | table->WatermarkRow[WM_DCFCLK][i].MinClock = |
1620 | clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; |
1621 | table->WatermarkRow[WM_DCFCLK][i].MaxClock = |
1622 | clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; |
1623 | table->WatermarkRow[WM_DCFCLK][i].MinMclk = |
1624 | clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; |
1625 | table->WatermarkRow[WM_DCFCLK][i].MaxMclk = |
1626 | clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; |
1627 | |
1628 | table->WatermarkRow[WM_DCFCLK][i].WmSetting = |
1629 | clock_ranges->reader_wm_sets[i].wm_inst; |
1630 | } |
1631 | |
1632 | for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { |
1633 | table->WatermarkRow[WM_SOCCLK][i].MinClock = |
1634 | clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; |
1635 | table->WatermarkRow[WM_SOCCLK][i].MaxClock = |
1636 | clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; |
1637 | table->WatermarkRow[WM_SOCCLK][i].MinMclk = |
1638 | clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; |
1639 | table->WatermarkRow[WM_SOCCLK][i].MaxMclk = |
1640 | clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; |
1641 | |
1642 | table->WatermarkRow[WM_SOCCLK][i].WmSetting = |
1643 | clock_ranges->writer_wm_sets[i].wm_inst; |
1644 | } |
1645 | |
1646 | smu->watermarks_bitmap |= WATERMARKS_EXIST(1 << 0); |
1647 | } |
1648 | |
1649 | /* pass data to smu controller */ |
1650 | if ((smu->watermarks_bitmap & WATERMARKS_EXIST(1 << 0)) && |
1651 | !(smu->watermarks_bitmap & WATERMARKS_LOADED(1 << 1))) { |
1652 | ret = smu_cmn_write_watermarks_table(smu); |
1653 | if (ret) { |
1654 | dev_err(smu->adev->dev, "Failed to update WMTABLE!")printf("drm:pid%d:%s *ERROR* " "Failed to update WMTABLE!", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1655 | return ret; |
1656 | } |
1657 | smu->watermarks_bitmap |= WATERMARKS_LOADED(1 << 1); |
1658 | } |
1659 | |
1660 | return 0; |
1661 | } |
1662 | |
1663 | static ssize_t vangogh_get_legacy_gpu_metrics_v2_3(struct smu_context *smu, |
1664 | void **table) |
1665 | { |
1666 | struct smu_table_context *smu_table = &smu->smu_table; |
1667 | struct gpu_metrics_v2_3 *gpu_metrics = |
1668 | (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table; |
1669 | SmuMetrics_legacy_t metrics; |
1670 | int ret = 0; |
1671 | |
1672 | ret = smu_cmn_get_metrics_table(smu, &metrics, true1); |
1673 | if (ret) |
1674 | return ret; |
1675 | |
1676 | smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3); |
1677 | |
1678 | gpu_metrics->temperature_gfx = metrics.GfxTemperature; |
1679 | gpu_metrics->temperature_soc = metrics.SocTemperature; |
1680 | memcpy(&gpu_metrics->temperature_core[0],__builtin_memcpy((&gpu_metrics->temperature_core[0]), ( &metrics.CoreTemperature[0]), (sizeof(uint16_t) * 4)) |
1681 | &metrics.CoreTemperature[0],__builtin_memcpy((&gpu_metrics->temperature_core[0]), ( &metrics.CoreTemperature[0]), (sizeof(uint16_t) * 4)) |
1682 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->temperature_core[0]), ( &metrics.CoreTemperature[0]), (sizeof(uint16_t) * 4)); |
1683 | gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; |
1684 | |
1685 | gpu_metrics->average_gfx_activity = metrics.GfxActivity; |
1686 | gpu_metrics->average_mm_activity = metrics.UvdActivity; |
1687 | |
1688 | gpu_metrics->average_socket_power = metrics.CurrentSocketPower; |
1689 | gpu_metrics->average_cpu_power = metrics.Power[0]; |
1690 | gpu_metrics->average_soc_power = metrics.Power[1]; |
1691 | gpu_metrics->average_gfx_power = metrics.Power[2]; |
1692 | memcpy(&gpu_metrics->average_core_power[0],__builtin_memcpy((&gpu_metrics->average_core_power[0]) , (&metrics.CorePower[0]), (sizeof(uint16_t) * 4)) |
1693 | &metrics.CorePower[0],__builtin_memcpy((&gpu_metrics->average_core_power[0]) , (&metrics.CorePower[0]), (sizeof(uint16_t) * 4)) |
1694 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->average_core_power[0]) , (&metrics.CorePower[0]), (sizeof(uint16_t) * 4)); |
1695 | |
1696 | gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; |
1697 | gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; |
1698 | gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; |
1699 | gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; |
1700 | gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; |
1701 | gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; |
1702 | |
1703 | memcpy(&gpu_metrics->current_coreclk[0],__builtin_memcpy((&gpu_metrics->current_coreclk[0]), ( &metrics.CoreFrequency[0]), (sizeof(uint16_t) * 4)) |
1704 | &metrics.CoreFrequency[0],__builtin_memcpy((&gpu_metrics->current_coreclk[0]), ( &metrics.CoreFrequency[0]), (sizeof(uint16_t) * 4)) |
1705 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->current_coreclk[0]), ( &metrics.CoreFrequency[0]), (sizeof(uint16_t) * 4)); |
1706 | gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; |
1707 | |
1708 | gpu_metrics->throttle_status = metrics.ThrottlerStatus; |
1709 | gpu_metrics->indep_throttle_status = |
1710 | smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, |
1711 | vangogh_throttler_map); |
1712 | |
1713 | gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); |
1714 | |
1715 | *table = (void *)gpu_metrics; |
1716 | |
1717 | return sizeof(struct gpu_metrics_v2_3); |
1718 | } |
1719 | |
1720 | static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu, |
1721 | void **table) |
1722 | { |
1723 | struct smu_table_context *smu_table = &smu->smu_table; |
1724 | struct gpu_metrics_v2_2 *gpu_metrics = |
1725 | (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; |
1726 | SmuMetrics_legacy_t metrics; |
1727 | int ret = 0; |
1728 | |
1729 | ret = smu_cmn_get_metrics_table(smu, &metrics, true1); |
1730 | if (ret) |
1731 | return ret; |
1732 | |
1733 | smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); |
1734 | |
1735 | gpu_metrics->temperature_gfx = metrics.GfxTemperature; |
1736 | gpu_metrics->temperature_soc = metrics.SocTemperature; |
1737 | memcpy(&gpu_metrics->temperature_core[0],__builtin_memcpy((&gpu_metrics->temperature_core[0]), ( &metrics.CoreTemperature[0]), (sizeof(uint16_t) * 4)) |
1738 | &metrics.CoreTemperature[0],__builtin_memcpy((&gpu_metrics->temperature_core[0]), ( &metrics.CoreTemperature[0]), (sizeof(uint16_t) * 4)) |
1739 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->temperature_core[0]), ( &metrics.CoreTemperature[0]), (sizeof(uint16_t) * 4)); |
1740 | gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; |
1741 | |
1742 | gpu_metrics->average_gfx_activity = metrics.GfxActivity; |
1743 | gpu_metrics->average_mm_activity = metrics.UvdActivity; |
1744 | |
1745 | gpu_metrics->average_socket_power = metrics.CurrentSocketPower; |
1746 | gpu_metrics->average_cpu_power = metrics.Power[0]; |
1747 | gpu_metrics->average_soc_power = metrics.Power[1]; |
1748 | gpu_metrics->average_gfx_power = metrics.Power[2]; |
1749 | memcpy(&gpu_metrics->average_core_power[0],__builtin_memcpy((&gpu_metrics->average_core_power[0]) , (&metrics.CorePower[0]), (sizeof(uint16_t) * 4)) |
1750 | &metrics.CorePower[0],__builtin_memcpy((&gpu_metrics->average_core_power[0]) , (&metrics.CorePower[0]), (sizeof(uint16_t) * 4)) |
1751 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->average_core_power[0]) , (&metrics.CorePower[0]), (sizeof(uint16_t) * 4)); |
1752 | |
1753 | gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; |
1754 | gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; |
1755 | gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; |
1756 | gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; |
1757 | gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; |
1758 | gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; |
1759 | |
1760 | memcpy(&gpu_metrics->current_coreclk[0],__builtin_memcpy((&gpu_metrics->current_coreclk[0]), ( &metrics.CoreFrequency[0]), (sizeof(uint16_t) * 4)) |
1761 | &metrics.CoreFrequency[0],__builtin_memcpy((&gpu_metrics->current_coreclk[0]), ( &metrics.CoreFrequency[0]), (sizeof(uint16_t) * 4)) |
1762 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->current_coreclk[0]), ( &metrics.CoreFrequency[0]), (sizeof(uint16_t) * 4)); |
1763 | gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; |
1764 | |
1765 | gpu_metrics->throttle_status = metrics.ThrottlerStatus; |
1766 | gpu_metrics->indep_throttle_status = |
1767 | smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, |
1768 | vangogh_throttler_map); |
1769 | |
1770 | gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); |
1771 | |
1772 | *table = (void *)gpu_metrics; |
1773 | |
1774 | return sizeof(struct gpu_metrics_v2_2); |
1775 | } |
1776 | |
1777 | static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu, |
1778 | void **table) |
1779 | { |
1780 | struct smu_table_context *smu_table = &smu->smu_table; |
1781 | struct gpu_metrics_v2_3 *gpu_metrics = |
1782 | (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table; |
1783 | SmuMetrics_t metrics; |
1784 | int ret = 0; |
1785 | |
1786 | ret = smu_cmn_get_metrics_table(smu, &metrics, true1); |
1787 | if (ret) |
1788 | return ret; |
1789 | |
1790 | smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3); |
1791 | |
1792 | gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; |
1793 | gpu_metrics->temperature_soc = metrics.Current.SocTemperature; |
1794 | memcpy(&gpu_metrics->temperature_core[0],__builtin_memcpy((&gpu_metrics->temperature_core[0]), ( &metrics.Current.CoreTemperature[0]), (sizeof(uint16_t) * 4)) |
1795 | &metrics.Current.CoreTemperature[0],__builtin_memcpy((&gpu_metrics->temperature_core[0]), ( &metrics.Current.CoreTemperature[0]), (sizeof(uint16_t) * 4)) |
1796 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->temperature_core[0]), ( &metrics.Current.CoreTemperature[0]), (sizeof(uint16_t) * 4)); |
1797 | gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; |
1798 | |
1799 | gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature; |
1800 | gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature; |
1801 | memcpy(&gpu_metrics->average_temperature_core[0],__builtin_memcpy((&gpu_metrics->average_temperature_core [0]), (&metrics.Average.CoreTemperature[0]), (sizeof(uint16_t ) * 4)) |
1802 | &metrics.Average.CoreTemperature[0],__builtin_memcpy((&gpu_metrics->average_temperature_core [0]), (&metrics.Average.CoreTemperature[0]), (sizeof(uint16_t ) * 4)) |
1803 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->average_temperature_core [0]), (&metrics.Average.CoreTemperature[0]), (sizeof(uint16_t ) * 4)); |
1804 | gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0]; |
1805 | |
1806 | gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; |
1807 | gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; |
1808 | |
1809 | gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; |
1810 | gpu_metrics->average_cpu_power = metrics.Current.Power[0]; |
1811 | gpu_metrics->average_soc_power = metrics.Current.Power[1]; |
1812 | gpu_metrics->average_gfx_power = metrics.Current.Power[2]; |
1813 | memcpy(&gpu_metrics->average_core_power[0],__builtin_memcpy((&gpu_metrics->average_core_power[0]) , (&metrics.Average.CorePower[0]), (sizeof(uint16_t) * 4) ) |
1814 | &metrics.Average.CorePower[0],__builtin_memcpy((&gpu_metrics->average_core_power[0]) , (&metrics.Average.CorePower[0]), (sizeof(uint16_t) * 4) ) |
1815 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->average_core_power[0]) , (&metrics.Average.CorePower[0]), (sizeof(uint16_t) * 4) ); |
1816 | |
1817 | gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; |
1818 | gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; |
1819 | gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; |
1820 | gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; |
1821 | gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; |
1822 | gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; |
1823 | |
1824 | gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; |
1825 | gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; |
1826 | gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; |
1827 | gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; |
1828 | gpu_metrics->current_vclk = metrics.Current.VclkFrequency; |
1829 | gpu_metrics->current_dclk = metrics.Current.DclkFrequency; |
1830 | |
1831 | memcpy(&gpu_metrics->current_coreclk[0],__builtin_memcpy((&gpu_metrics->current_coreclk[0]), ( &metrics.Current.CoreFrequency[0]), (sizeof(uint16_t) * 4 )) |
1832 | &metrics.Current.CoreFrequency[0],__builtin_memcpy((&gpu_metrics->current_coreclk[0]), ( &metrics.Current.CoreFrequency[0]), (sizeof(uint16_t) * 4 )) |
1833 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->current_coreclk[0]), ( &metrics.Current.CoreFrequency[0]), (sizeof(uint16_t) * 4 )); |
1834 | gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; |
1835 | |
1836 | gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; |
1837 | gpu_metrics->indep_throttle_status = |
1838 | smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, |
1839 | vangogh_throttler_map); |
1840 | |
1841 | gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); |
1842 | |
1843 | *table = (void *)gpu_metrics; |
1844 | |
1845 | return sizeof(struct gpu_metrics_v2_3); |
1846 | } |
1847 | |
1848 | static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, |
1849 | void **table) |
1850 | { |
1851 | struct smu_table_context *smu_table = &smu->smu_table; |
1852 | struct gpu_metrics_v2_2 *gpu_metrics = |
1853 | (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; |
1854 | SmuMetrics_t metrics; |
1855 | int ret = 0; |
1856 | |
1857 | ret = smu_cmn_get_metrics_table(smu, &metrics, true1); |
1858 | if (ret) |
1859 | return ret; |
1860 | |
1861 | smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); |
1862 | |
1863 | gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; |
1864 | gpu_metrics->temperature_soc = metrics.Current.SocTemperature; |
1865 | memcpy(&gpu_metrics->temperature_core[0],__builtin_memcpy((&gpu_metrics->temperature_core[0]), ( &metrics.Current.CoreTemperature[0]), (sizeof(uint16_t) * 4)) |
1866 | &metrics.Current.CoreTemperature[0],__builtin_memcpy((&gpu_metrics->temperature_core[0]), ( &metrics.Current.CoreTemperature[0]), (sizeof(uint16_t) * 4)) |
1867 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->temperature_core[0]), ( &metrics.Current.CoreTemperature[0]), (sizeof(uint16_t) * 4)); |
1868 | gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; |
1869 | |
1870 | gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; |
1871 | gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; |
1872 | |
1873 | gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; |
1874 | gpu_metrics->average_cpu_power = metrics.Current.Power[0]; |
1875 | gpu_metrics->average_soc_power = metrics.Current.Power[1]; |
1876 | gpu_metrics->average_gfx_power = metrics.Current.Power[2]; |
1877 | memcpy(&gpu_metrics->average_core_power[0],__builtin_memcpy((&gpu_metrics->average_core_power[0]) , (&metrics.Average.CorePower[0]), (sizeof(uint16_t) * 4) ) |
1878 | &metrics.Average.CorePower[0],__builtin_memcpy((&gpu_metrics->average_core_power[0]) , (&metrics.Average.CorePower[0]), (sizeof(uint16_t) * 4) ) |
1879 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->average_core_power[0]) , (&metrics.Average.CorePower[0]), (sizeof(uint16_t) * 4) ); |
1880 | |
1881 | gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; |
1882 | gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; |
1883 | gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; |
1884 | gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; |
1885 | gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; |
1886 | gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; |
1887 | |
1888 | gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; |
1889 | gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; |
1890 | gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; |
1891 | gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; |
1892 | gpu_metrics->current_vclk = metrics.Current.VclkFrequency; |
1893 | gpu_metrics->current_dclk = metrics.Current.DclkFrequency; |
1894 | |
1895 | memcpy(&gpu_metrics->current_coreclk[0],__builtin_memcpy((&gpu_metrics->current_coreclk[0]), ( &metrics.Current.CoreFrequency[0]), (sizeof(uint16_t) * 4 )) |
1896 | &metrics.Current.CoreFrequency[0],__builtin_memcpy((&gpu_metrics->current_coreclk[0]), ( &metrics.Current.CoreFrequency[0]), (sizeof(uint16_t) * 4 )) |
1897 | sizeof(uint16_t) * 4)__builtin_memcpy((&gpu_metrics->current_coreclk[0]), ( &metrics.Current.CoreFrequency[0]), (sizeof(uint16_t) * 4 )); |
1898 | gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; |
1899 | |
1900 | gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; |
1901 | gpu_metrics->indep_throttle_status = |
1902 | smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, |
1903 | vangogh_throttler_map); |
1904 | |
1905 | gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); |
1906 | |
1907 | *table = (void *)gpu_metrics; |
1908 | |
1909 | return sizeof(struct gpu_metrics_v2_2); |
1910 | } |
1911 | |
1912 | static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, |
1913 | void **table) |
1914 | { |
1915 | uint32_t if_version; |
1916 | uint32_t smu_version; |
1917 | int ret = 0; |
1918 | |
1919 | ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); |
1920 | if (ret) { |
1921 | return ret; |
1922 | } |
1923 | |
1924 | if (smu_version >= 0x043F3E00) { |
1925 | if (if_version < 0x3) |
1926 | ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table); |
1927 | else |
1928 | ret = vangogh_get_gpu_metrics_v2_3(smu, table); |
1929 | } else { |
1930 | if (if_version < 0x3) |
1931 | ret = vangogh_get_legacy_gpu_metrics(smu, table); |
1932 | else |
1933 | ret = vangogh_get_gpu_metrics(smu, table); |
1934 | } |
1935 | |
1936 | return ret; |
1937 | } |
1938 | |
1939 | static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, |
1940 | long input[], uint32_t size) |
1941 | { |
1942 | int ret = 0; |
1943 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); |
1944 | |
1945 | if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) { |
1946 | dev_warn(smu->adev->dev,printf("drm:pid%d:%s *WARNING* " "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) |
1947 | "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n")printf("drm:pid%d:%s *WARNING* " "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1948 | return -EINVAL22; |
1949 | } |
1950 | |
1951 | switch (type) { |
1952 | case PP_OD_EDIT_CCLK_VDDC_TABLE: |
1953 | if (size != 3) { |
1954 | dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n")printf("drm:pid%d:%s *ERROR* " "Input parameter number not correct (should be 4 for processor)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1955 | return -EINVAL22; |
1956 | } |
1957 | if (input[0] >= smu->cpu_core_num) { |
1958 | dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n",printf("drm:pid%d:%s *ERROR* " "core index is overflow, should be less than %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , smu-> cpu_core_num) |
1959 | smu->cpu_core_num)printf("drm:pid%d:%s *ERROR* " "core index is overflow, should be less than %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , smu-> cpu_core_num); |
1960 | } |
1961 | smu->cpu_core_id_select = input[0]; |
1962 | if (input[1] == 0) { |
1963 | if (input[2] < smu->cpu_default_soft_min_freq) { |
1964 | dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",printf("drm:pid%d:%s *WARNING* " "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , input[ 2], smu->cpu_default_soft_min_freq) |
1965 | input[2], smu->cpu_default_soft_min_freq)printf("drm:pid%d:%s *WARNING* " "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , input[ 2], smu->cpu_default_soft_min_freq); |
1966 | return -EINVAL22; |
1967 | } |
1968 | smu->cpu_actual_soft_min_freq = input[2]; |
1969 | } else if (input[1] == 1) { |
1970 | if (input[2] > smu->cpu_default_soft_max_freq) { |
1971 | dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",printf("drm:pid%d:%s *WARNING* " "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , input[ 2], smu->cpu_default_soft_max_freq) |
1972 | input[2], smu->cpu_default_soft_max_freq)printf("drm:pid%d:%s *WARNING* " "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , input[ 2], smu->cpu_default_soft_max_freq); |
1973 | return -EINVAL22; |
1974 | } |
1975 | smu->cpu_actual_soft_max_freq = input[2]; |
1976 | } else { |
1977 | return -EINVAL22; |
1978 | } |
1979 | break; |
1980 | case PP_OD_EDIT_SCLK_VDDC_TABLE: |
1981 | if (size != 2) { |
1982 | dev_err(smu->adev->dev, "Input parameter number not correct\n")printf("drm:pid%d:%s *ERROR* " "Input parameter number not correct\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1983 | return -EINVAL22; |
1984 | } |
1985 | |
1986 | if (input[0] == 0) { |
1987 | if (input[1] < smu->gfx_default_hard_min_freq) { |
1988 | dev_warn(smu->adev->dev,printf("drm:pid%d:%s *WARNING* " "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , input[ 1], smu->gfx_default_hard_min_freq) |
1989 | "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",printf("drm:pid%d:%s *WARNING* " "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , input[ 1], smu->gfx_default_hard_min_freq) |
1990 | input[1], smu->gfx_default_hard_min_freq)printf("drm:pid%d:%s *WARNING* " "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , input[ 1], smu->gfx_default_hard_min_freq); |
1991 | return -EINVAL22; |
1992 | } |
1993 | smu->gfx_actual_hard_min_freq = input[1]; |
1994 | } else if (input[0] == 1) { |
1995 | if (input[1] > smu->gfx_default_soft_max_freq) { |
1996 | dev_warn(smu->adev->dev,printf("drm:pid%d:%s *WARNING* " "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , input[ 1], smu->gfx_default_soft_max_freq) |
1997 | "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",printf("drm:pid%d:%s *WARNING* " "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , input[ 1], smu->gfx_default_soft_max_freq) |
1998 | input[1], smu->gfx_default_soft_max_freq)printf("drm:pid%d:%s *WARNING* " "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , input[ 1], smu->gfx_default_soft_max_freq); |
1999 | return -EINVAL22; |
2000 | } |
2001 | smu->gfx_actual_soft_max_freq = input[1]; |
2002 | } else { |
2003 | return -EINVAL22; |
2004 | } |
2005 | break; |
2006 | case PP_OD_RESTORE_DEFAULT_TABLE: |
2007 | if (size != 0) { |
2008 | dev_err(smu->adev->dev, "Input parameter number not correct\n")printf("drm:pid%d:%s *ERROR* " "Input parameter number not correct\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2009 | return -EINVAL22; |
2010 | } else { |
2011 | smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; |
2012 | smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; |
2013 | smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; |
2014 | smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; |
2015 | } |
2016 | break; |
2017 | case PP_OD_COMMIT_DPM_TABLE: |
2018 | if (size != 0) { |
2019 | dev_err(smu->adev->dev, "Input parameter number not correct\n")printf("drm:pid%d:%s *ERROR* " "Input parameter number not correct\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2020 | return -EINVAL22; |
2021 | } else { |
2022 | if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { |
2023 | dev_err(smu->adev->dev,printf("drm:pid%d:%s *ERROR* " "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , smu-> gfx_actual_hard_min_freq, smu->gfx_actual_soft_max_freq) |
2024 | "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",printf("drm:pid%d:%s *ERROR* " "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , smu-> gfx_actual_hard_min_freq, smu->gfx_actual_soft_max_freq) |
2025 | smu->gfx_actual_hard_min_freq,printf("drm:pid%d:%s *ERROR* " "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , smu-> gfx_actual_hard_min_freq, smu->gfx_actual_soft_max_freq) |
2026 | smu->gfx_actual_soft_max_freq)printf("drm:pid%d:%s *ERROR* " "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , smu-> gfx_actual_hard_min_freq, smu->gfx_actual_soft_max_freq); |
2027 | return -EINVAL22; |
2028 | } |
2029 | |
2030 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, |
2031 | smu->gfx_actual_hard_min_freq, NULL((void *)0)); |
2032 | if (ret) { |
2033 | dev_err(smu->adev->dev, "Set hard min sclk failed!")printf("drm:pid%d:%s *ERROR* " "Set hard min sclk failed!", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2034 | return ret; |
2035 | } |
2036 | |
2037 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, |
2038 | smu->gfx_actual_soft_max_freq, NULL((void *)0)); |
2039 | if (ret) { |
2040 | dev_err(smu->adev->dev, "Set soft max sclk failed!")printf("drm:pid%d:%s *ERROR* " "Set soft max sclk failed!", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2041 | return ret; |
2042 | } |
2043 | |
2044 | if (smu->adev->pm.fw_version < 0x43f1b00) { |
2045 | dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n")printf("drm:pid%d:%s *WARNING* " "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2046 | break; |
2047 | } |
2048 | |
2049 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk, |
2050 | ((smu->cpu_core_id_select << 20) |
2051 | | smu->cpu_actual_soft_min_freq), |
2052 | NULL((void *)0)); |
2053 | if (ret) { |
2054 | dev_err(smu->adev->dev, "Set hard min cclk failed!")printf("drm:pid%d:%s *ERROR* " "Set hard min cclk failed!", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2055 | return ret; |
2056 | } |
2057 | |
2058 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk, |
2059 | ((smu->cpu_core_id_select << 20) |
2060 | | smu->cpu_actual_soft_max_freq), |
2061 | NULL((void *)0)); |
2062 | if (ret) { |
2063 | dev_err(smu->adev->dev, "Set soft max cclk failed!")printf("drm:pid%d:%s *ERROR* " "Set soft max cclk failed!", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2064 | return ret; |
2065 | } |
2066 | } |
2067 | break; |
2068 | default: |
2069 | return -ENOSYS78; |
2070 | } |
2071 | |
2072 | return ret; |
2073 | } |
2074 | |
2075 | static int vangogh_set_default_dpm_tables(struct smu_context *smu) |
2076 | { |
2077 | struct smu_table_context *smu_table = &smu->smu_table; |
2078 | |
2079 | return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false0); |
2080 | } |
2081 | |
2082 | static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) |
2083 | { |
2084 | DpmClocks_t *clk_table = smu->smu_table.clocks_table; |
2085 | |
2086 | smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; |
2087 | smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; |
2088 | smu->gfx_actual_hard_min_freq = 0; |
2089 | smu->gfx_actual_soft_max_freq = 0; |
2090 | |
2091 | smu->cpu_default_soft_min_freq = 1400; |
2092 | smu->cpu_default_soft_max_freq = 3500; |
2093 | smu->cpu_actual_soft_min_freq = 0; |
2094 | smu->cpu_actual_soft_max_freq = 0; |
2095 | |
2096 | return 0; |
2097 | } |
2098 | |
2099 | static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) |
2100 | { |
2101 | DpmClocks_t *table = smu->smu_table.clocks_table; |
2102 | int i; |
2103 | |
2104 | if (!clock_table || !table) |
2105 | return -EINVAL22; |
2106 | |
2107 | for (i = 0; i < NUM_SOCCLK_DPM_LEVELS7; i++) { |
2108 | clock_table->SocClocks[i].Freq = table->SocClocks[i]; |
2109 | clock_table->SocClocks[i].Vol = table->SocVoltage[i]; |
2110 | } |
2111 | |
2112 | for (i = 0; i < NUM_FCLK_DPM_LEVELS4; i++) { |
2113 | clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk; |
2114 | clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage; |
2115 | } |
2116 | |
2117 | for (i = 0; i < NUM_FCLK_DPM_LEVELS4; i++) { |
2118 | clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk; |
2119 | clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage; |
2120 | } |
2121 | |
2122 | return 0; |
2123 | } |
2124 | |
2125 | |
2126 | static int vangogh_system_features_control(struct smu_context *smu, bool_Bool en) |
2127 | { |
2128 | struct amdgpu_device *adev = smu->adev; |
2129 | int ret = 0; |
2130 | |
2131 | if (adev->pm.fw_version >= 0x43f1700 && !en) |
2132 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify, |
2133 | RLC_STATUS_OFF0, NULL((void *)0)); |
2134 | |
2135 | return ret; |
2136 | } |
2137 | |
2138 | static int vangogh_post_smu_init(struct smu_context *smu) |
2139 | { |
2140 | struct amdgpu_device *adev = smu->adev; |
2141 | uint32_t tmp; |
2142 | int ret = 0; |
2143 | uint8_t aon_bits = 0; |
2144 | /* Two CUs in one WGP */ |
2145 | uint32_t req_active_wgps = adev->gfx.cu_info.number/2; |
2146 | uint32_t total_cu = adev->gfx.config.max_cu_per_sh * |
2147 | adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; |
2148 | |
2149 | /* allow message will be sent after enable message on Vangogh*/ |
2150 | if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && |
2151 | (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG(1 << 0))) { |
2152 | ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL((void *)0)); |
2153 | if (ret) { |
2154 | dev_err(adev->dev, "Failed to Enable GfxOff!\n")printf("drm:pid%d:%s *ERROR* " "Failed to Enable GfxOff!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2155 | return ret; |
2156 | } |
2157 | } else { |
2158 | adev->pm.pp_feature &= ~PP_GFXOFF_MASK; |
2159 | dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n")do { } while(0); |
2160 | } |
2161 | |
2162 | /* if all CUs are active, no need to power off any WGPs */ |
2163 | if (total_cu == adev->gfx.cu_info.number) |
2164 | return 0; |
2165 | |
2166 | /* |
2167 | * Calculate the total bits number of always on WGPs for all SA/SEs in |
2168 | * RLC_PG_ALWAYS_ON_WGP_MASK. |
2169 | */ |
2170 | tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK))amdgpu_kiq_rreg(adev, ((adev->reg_offset[GC_HWIP][0][1] + 0x4c53 ))); |
2171 | tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK0xFFFFFFFFL; |
2172 | |
2173 | aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines; |
2174 | |
2175 | /* Do not request any WGPs less than set in the AON_WGP_MASK */ |
2176 | if (aon_bits > req_active_wgps) { |
2177 | dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n")do { } while(0); |
2178 | return 0; |
2179 | } else { |
2180 | return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL((void *)0)); |
2181 | } |
2182 | } |
2183 | |
2184 | static int vangogh_mode_reset(struct smu_context *smu, int type) |
2185 | { |
2186 | int ret = 0, index = 0; |
2187 | |
2188 | index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, |
2189 | SMU_MSG_GfxDeviceDriverReset); |
2190 | if (index < 0) |
2191 | return index == -EACCES13 ? 0 : index; |
2192 | |
2193 | mutex_lock(&smu->message_lock)rw_enter_write(&smu->message_lock); |
2194 | |
2195 | ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type); |
2196 | |
2197 | mutex_unlock(&smu->message_lock)rw_exit_write(&smu->message_lock); |
2198 | |
2199 | mdelay(10); |
2200 | |
2201 | return ret; |
2202 | } |
2203 | |
2204 | static int vangogh_mode2_reset(struct smu_context *smu) |
2205 | { |
2206 | return vangogh_mode_reset(smu, SMU_RESET_MODE_2); |
2207 | } |
2208 | |
2209 | /** |
2210 | * vangogh_get_gfxoff_status - Get gfxoff status |
2211 | * |
2212 | * @smu: amdgpu_device pointer |
2213 | * |
2214 | * Get current gfxoff status |
2215 | * |
2216 | * Return: |
2217 | * * 0 - GFXOFF (default if enabled). |
2218 | * * 1 - Transition out of GFX State. |
2219 | * * 2 - Not in GFXOFF. |
2220 | * * 3 - Transition into GFXOFF. |
2221 | */ |
2222 | static u32 vangogh_get_gfxoff_status(struct smu_context *smu) |
2223 | { |
2224 | struct amdgpu_device *adev = smu->adev; |
2225 | u32 reg, gfxoff_status; |
2226 | |
2227 | reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[SMUIO_HWIP][0 ][0] + 0x00c5, 0, SMUIO_HWIP) : amdgpu_device_rreg(adev, (adev ->reg_offset[SMUIO_HWIP][0][0] + 0x00c5), 0)); |
2228 | gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK0x00000006L) |
2229 | >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT0x1; |
2230 | |
2231 | return gfxoff_status; |
2232 | } |
2233 | |
2234 | static int vangogh_get_power_limit(struct smu_context *smu, |
2235 | uint32_t *current_power_limit, |
2236 | uint32_t *default_power_limit, |
2237 | uint32_t *max_power_limit) |
2238 | { |
2239 | struct smu_11_5_power_context *power_context = |
2240 | smu->smu_power.power_context; |
2241 | uint32_t ppt_limit; |
2242 | int ret = 0; |
2243 | |
2244 | if (smu->adev->pm.fw_version < 0x43f1e00) |
2245 | return ret; |
2246 | |
2247 | ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit); |
2248 | if (ret) { |
2249 | dev_err(smu->adev->dev, "Get slow PPT limit failed!\n")printf("drm:pid%d:%s *ERROR* " "Get slow PPT limit failed!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2250 | return ret; |
2251 | } |
2252 | /* convert from milliwatt to watt */ |
2253 | if (current_power_limit) |
2254 | *current_power_limit = ppt_limit / 1000; |
2255 | if (default_power_limit) |
2256 | *default_power_limit = ppt_limit / 1000; |
2257 | if (max_power_limit) |
2258 | *max_power_limit = 29; |
2259 | |
2260 | ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit); |
2261 | if (ret) { |
2262 | dev_err(smu->adev->dev, "Get fast PPT limit failed!\n")printf("drm:pid%d:%s *ERROR* " "Get fast PPT limit failed!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2263 | return ret; |
2264 | } |
2265 | /* convert from milliwatt to watt */ |
2266 | power_context->current_fast_ppt_limit = |
2267 | power_context->default_fast_ppt_limit = ppt_limit / 1000; |
2268 | power_context->max_fast_ppt_limit = 30; |
2269 | |
2270 | return ret; |
2271 | } |
2272 | |
2273 | static int vangogh_get_ppt_limit(struct smu_context *smu, |
2274 | uint32_t *ppt_limit, |
2275 | enum smu_ppt_limit_type type, |
2276 | enum smu_ppt_limit_level level) |
2277 | { |
2278 | struct smu_11_5_power_context *power_context = |
2279 | smu->smu_power.power_context; |
2280 | |
2281 | if (!power_context) |
2282 | return -EOPNOTSUPP45; |
2283 | |
2284 | if (type == SMU_FAST_PPT_LIMIT) { |
2285 | switch (level) { |
2286 | case SMU_PPT_LIMIT_MAX: |
2287 | *ppt_limit = power_context->max_fast_ppt_limit; |
2288 | break; |
2289 | case SMU_PPT_LIMIT_CURRENT: |
2290 | *ppt_limit = power_context->current_fast_ppt_limit; |
2291 | break; |
2292 | case SMU_PPT_LIMIT_DEFAULT: |
2293 | *ppt_limit = power_context->default_fast_ppt_limit; |
2294 | break; |
2295 | default: |
2296 | break; |
2297 | } |
2298 | } |
2299 | |
2300 | return 0; |
2301 | } |
2302 | |
2303 | static int vangogh_set_power_limit(struct smu_context *smu, |
2304 | enum smu_ppt_limit_type limit_type, |
2305 | uint32_t ppt_limit) |
2306 | { |
2307 | struct smu_11_5_power_context *power_context = |
2308 | smu->smu_power.power_context; |
2309 | int ret = 0; |
2310 | |
2311 | if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { |
2312 | dev_err(smu->adev->dev, "Setting new power limit is not supported!\n")printf("drm:pid%d:%s *ERROR* " "Setting new power limit is not supported!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
2313 | return -EOPNOTSUPP45; |
2314 | } |
2315 | |
2316 | switch (limit_type) { |
2317 | case SMU_DEFAULT_PPT_LIMIT: |
2318 | ret = smu_cmn_send_smc_msg_with_param(smu, |
2319 | SMU_MSG_SetSlowPPTLimit, |
2320 | ppt_limit * 1000, /* convert from watt to milliwatt */ |
2321 | NULL((void *)0)); |
2322 | if (ret) |
2323 | return ret; |
2324 | |
2325 | smu->current_power_limit = ppt_limit; |
2326 | break; |
2327 | case SMU_FAST_PPT_LIMIT: |
2328 | ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24); |
2329 | if (ppt_limit > power_context->max_fast_ppt_limit) { |
2330 | dev_err(smu->adev->dev,printf("drm:pid%d:%s *ERROR* " "New power limit (%d) is over the max allowed %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ppt_limit , power_context->max_fast_ppt_limit) |
2331 | "New power limit (%d) is over the max allowed %d\n",printf("drm:pid%d:%s *ERROR* " "New power limit (%d) is over the max allowed %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ppt_limit , power_context->max_fast_ppt_limit) |
2332 | ppt_limit, power_context->max_fast_ppt_limit)printf("drm:pid%d:%s *ERROR* " "New power limit (%d) is over the max allowed %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , ppt_limit , power_context->max_fast_ppt_limit); |
2333 | return ret; |
2334 | } |
2335 | |
2336 | ret = smu_cmn_send_smc_msg_with_param(smu, |
2337 | SMU_MSG_SetFastPPTLimit, |
2338 | ppt_limit * 1000, /* convert from watt to milliwatt */ |
2339 | NULL((void *)0)); |
2340 | if (ret) |
2341 | return ret; |
2342 | |
2343 | power_context->current_fast_ppt_limit = ppt_limit; |
2344 | break; |
2345 | default: |
2346 | return -EINVAL22; |
2347 | } |
2348 | |
2349 | return ret; |
2350 | } |
2351 | |
2352 | /** |
2353 | * vangogh_set_gfxoff_residency |
2354 | * |
2355 | * @smu: amdgpu_device pointer |
2356 | * @start: start/stop residency log |
2357 | * |
2358 | * This function will be used to log gfxoff residency |
2359 | * |
2360 | * |
2361 | * Returns standard response codes. |
2362 | */ |
2363 | static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool_Bool start) |
2364 | { |
2365 | int ret = 0; |
2366 | u32 residency; |
2367 | struct amdgpu_device *adev = smu->adev; |
2368 | |
2369 | if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) |
2370 | return 0; |
2371 | |
2372 | ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency, |
2373 | start, &residency); |
2374 | |
2375 | if (!start) |
2376 | adev->gfx.gfx_off_residency = residency; |
2377 | |
2378 | return ret; |
2379 | } |
2380 | |
2381 | /** |
2382 | * vangogh_get_gfxoff_residency |
2383 | * |
2384 | * @smu: amdgpu_device pointer |
2385 | * |
2386 | * This function will be used to get gfxoff residency. |
2387 | * |
2388 | * Returns standard response codes. |
2389 | */ |
2390 | static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *residency) |
2391 | { |
2392 | struct amdgpu_device *adev = smu->adev; |
2393 | |
2394 | *residency = adev->gfx.gfx_off_residency; |
2395 | |
2396 | return 0; |
2397 | } |
2398 | |
2399 | /** |
2400 | * vangogh_get_gfxoff_entrycount - get gfxoff entry count |
2401 | * |
2402 | * @smu: amdgpu_device pointer |
2403 | * |
2404 | * This function will be used to get gfxoff entry count |
2405 | * |
2406 | * Returns standard response codes. |
2407 | */ |
2408 | static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entrycount) |
2409 | { |
2410 | int ret = 0, value = 0; |
2411 | struct amdgpu_device *adev = smu->adev; |
2412 | |
2413 | if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) |
2414 | return 0; |
2415 | |
2416 | ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetGfxOffEntryCount, &value); |
2417 | *entrycount = value + adev->gfx.gfx_off_entrycount; |
2418 | |
2419 | return ret; |
2420 | } |
2421 | |
2422 | static const struct pptable_funcs vangogh_ppt_funcs = { |
2423 | |
2424 | .check_fw_status = smu_v11_0_check_fw_status, |
2425 | .check_fw_version = smu_v11_0_check_fw_version, |
2426 | .init_smc_tables = vangogh_init_smc_tables, |
2427 | .fini_smc_tables = smu_v11_0_fini_smc_tables, |
2428 | .init_power = smu_v11_0_init_power, |
2429 | .fini_power = smu_v11_0_fini_power, |
2430 | .register_irq_handler = smu_v11_0_register_irq_handler, |
2431 | .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, |
2432 | .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, |
2433 | .send_smc_msg = smu_cmn_send_smc_msg, |
2434 | .dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable, |
2435 | .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable, |
2436 | .is_dpm_running = vangogh_is_dpm_running, |
2437 | .read_sensor = vangogh_read_sensor, |
2438 | .get_enabled_mask = smu_cmn_get_enabled_mask, |
2439 | .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, |
2440 | .set_watermarks_table = vangogh_set_watermarks_table, |
2441 | .set_driver_table_location = smu_v11_0_set_driver_table_location, |
2442 | .interrupt_work = smu_v11_0_interrupt_work, |
2443 | .get_gpu_metrics = vangogh_common_get_gpu_metrics, |
2444 | .od_edit_dpm_table = vangogh_od_edit_dpm_table, |
2445 | .print_clk_levels = vangogh_common_print_clk_levels, |
2446 | .set_default_dpm_table = vangogh_set_default_dpm_tables, |
2447 | .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters, |
2448 | .system_features_control = vangogh_system_features_control, |
2449 | .feature_is_enabled = smu_cmn_feature_is_enabled, |
2450 | .set_power_profile_mode = vangogh_set_power_profile_mode, |
2451 | .get_power_profile_mode = vangogh_get_power_profile_mode, |
2452 | .get_dpm_clock_table = vangogh_get_dpm_clock_table, |
2453 | .force_clk_levels = vangogh_force_clk_levels, |
2454 | .set_performance_level = vangogh_set_performance_level, |
2455 | .post_init = vangogh_post_smu_init, |
2456 | .mode2_reset = vangogh_mode2_reset, |
2457 | .gfx_off_control = smu_v11_0_gfx_off_control, |
2458 | .get_gfx_off_status = vangogh_get_gfxoff_status, |
2459 | .get_gfx_off_entrycount = vangogh_get_gfxoff_entrycount, |
2460 | .get_gfx_off_residency = vangogh_get_gfxoff_residency, |
2461 | .set_gfx_off_residency = vangogh_set_gfxoff_residency, |
2462 | .get_ppt_limit = vangogh_get_ppt_limit, |
2463 | .get_power_limit = vangogh_get_power_limit, |
2464 | .set_power_limit = vangogh_set_power_limit, |
2465 | .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, |
2466 | }; |
2467 | |
2468 | void vangogh_set_ppt_funcs(struct smu_context *smu) |
2469 | { |
2470 | smu->ppt_funcs = &vangogh_ppt_funcs; |
2471 | smu->message_map = vangogh_message_map; |
2472 | smu->feature_map = vangogh_feature_mask_map; |
2473 | smu->table_map = vangogh_table_map; |
2474 | smu->workload_map = vangogh_workload_map; |
2475 | smu->is_apu = true1; |
2476 | smu_v11_0_set_smu_mailbox_registers(smu); |
2477 | } |