| File: | dev/pci/drm/amd/pm/swsmu/smu_cmn.c |
| Warning: | line 516, column 24 Value stored to 'adev' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* |
| 2 | * Copyright 2020 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | */ |
| 22 | |
| 23 | #define SWSMU_CODE_LAYER_L4 |
| 24 | |
| 25 | #include "amdgpu.h" |
| 26 | #include "amdgpu_smu.h" |
| 27 | #include "smu_cmn.h" |
| 28 | #include "soc15_common.h" |
| 29 | |
| 30 | /* |
| 31 | * DO NOT use these for err/warn/info/debug messages. |
| 32 | * Use dev_err, dev_warn, dev_info and dev_dbg instead. |
| 33 | * They are more MGPU friendly. |
| 34 | */ |
| 35 | #undef pr_err |
| 36 | #undef pr_warn |
| 37 | #undef pr_info |
| 38 | #undef pr_debug |
| 39 | |
| 40 | #define MP1_C2PMSG_90__CONTENT_MASK0xFFFFFFFFL 0xFFFFFFFFL |
| 41 | |
| 42 | #undef __SMU_DUMMY_MAP |
| 43 | #define __SMU_DUMMY_MAP(type)"type" #type |
| 44 | static const char * const __smu_message_names[] = { |
| 45 | SMU_MESSAGE_TYPES"TestMessage", "GetSmuVersion", "GetDriverIfVersion", "SetAllowedFeaturesMaskLow" , "SetAllowedFeaturesMaskHigh", "EnableAllSmuFeatures", "DisableAllSmuFeatures" , "EnableSmuFeaturesLow", "EnableSmuFeaturesHigh", "DisableSmuFeaturesLow" , "DisableSmuFeaturesHigh", "GetEnabledSmuFeatures", "GetEnabledSmuFeaturesLow" , "GetEnabledSmuFeaturesHigh", "SetWorkloadMask", "SetPptLimit" , "SetDriverDramAddrHigh", "SetDriverDramAddrLow", "SetToolsDramAddrHigh" , "SetToolsDramAddrLow", "TransferTableSmu2Dram", "TransferTableDram2Smu" , "UseDefaultPPTable", "UseBackupPPTable", "RunBtc", "RequestI2CBus" , "ReleaseI2CBus", "SetFloorSocVoltage", "SoftReset", "StartBacoMonitor" , "CancelBacoMonitor", "EnterBaco", "SetSoftMinByFreq", "SetSoftMaxByFreq" , "SetHardMinByFreq", "SetHardMaxByFreq", "GetMinDpmFreq", "GetMaxDpmFreq" , "GetDpmFreqByIndex", "GetDpmClockFreq", "GetSsVoltageByDpm" , "SetMemoryChannelConfig", "SetGeminiMode", "SetGeminiApertureHigh" , "SetGeminiApertureLow", "SetMinLinkDpmByIndex", "OverridePcieParameters" , "OverDriveSetPercentage", "SetMinDeepSleepDcefclk", "ReenableAcDcInterrupt" , "AllowIHHostInterrupt", "NotifyPowerSource", "SetUclkFastSwitch" , "SetUclkDownHyst", "GfxDeviceDriverReset", "GetCurrentRpm", "SetVideoFps", "SetTjMax", "SetFanTemperatureTarget", "PrepareMp1ForUnload" , "DramLogSetDramAddrHigh", "DramLogSetDramAddrLow", "DramLogSetDramSize" , "SetFanMaxRpm", "SetFanMinPwm", "ConfigureGfxDidt", "NumOfDisplays" , "RemoveMargins", "ReadSerialNumTop32", "ReadSerialNumBottom32" , "SetSystemVirtualDramAddrHigh", "SetSystemVirtualDramAddrLow" , "WaflTest", "SetFclkGfxClkRatio", "AllowGfxOff", "DisallowGfxOff" , "GetPptLimit", "GetDcModeMaxDpmFreq", "GetDebugData", "SetXgmiMode" , "RunAfllBtc", "ExitBaco", "PrepareMp1ForReset", "PrepareMp1ForShutdown" , "SetMGpuFanBoostLimitRpm", "GetAVFSVoltageByDpm", "PowerUpVcn" , "PowerDownVcn", "PowerUpJpeg", "PowerDownJpeg", "BacoAudioD3PME" , "ArmD3", "RunDcBtc", "RunGfxDcBtc", "RunSocDcBtc", "SetMemoryChannelEnable" , "SetDfSwitchType", "GetVoltageByDpm", "GetVoltageByDpmOverdrive" , "PowerUpVcn0", "PowerDownVcn0", "PowerUpVcn1", "PowerDownVcn1" , "PowerUpGfx", "PowerDownIspByTile", "PowerUpIspByTile", "PowerDownSdma" , "PowerUpSdma", "SetHardMinIspclkByFreq", "SetHardMinVcn", "SetAllowFclkSwitch" , "SetMinVideoGfxclkFreq", "ActiveProcessNotify", "SetCustomPolicy" , "QueryPowerLimit", "SetGfxclkOverdriveByFreqVid", "SetHardMinDcfclkByFreq" , "SetHardMinSocclkByFreq", "ControlIgpuATS", "SetMinVideoFclkFreq" , "SetMinDeepSleepDcfclk", "ForcePowerDownGfx", "SetPhyclkVoltageByFreq" , "SetDppclkVoltageByFreq", "SetSoftMinVcn", "EnablePostCode" , "GetGfxclkFrequency", "GetFclkFrequency", "GetMinGfxclkFrequency" , "GetMaxGfxclkFrequency", "SetGfxCGPG", "SetSoftMaxGfxClk", "SetHardMinGfxClk" , "SetSoftMaxSocclkByFreq", "SetSoftMaxFclkByFreq", "SetSoftMaxVcn" , "PowerGateMmHub", "UpdatePmeRestore", "GpuChangeState", "SetPowerLimitPercentage" , "ForceGfxContentSave", "EnableTmdp48MHzRefclkPwrDown", "PowerGateAtHub" , "SetSoftMinJpeg", "SetHardMinFclkByFreq", "DFCstateControl" , "GmiPwrDnControl", "spare", "SetNumBadHbmPagesRetired", "GetGmiPwrDnHyst" , "SetGmiPwrDnHyst", "EnterGfxoff", "ExitGfxoff", "SetExecuteDMATest" , "DAL_DISABLE_DUMMY_PSTATE_CHANGE", "DAL_ENABLE_DUMMY_PSTATE_CHANGE" , "SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH", "SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW" , "GET_UMC_FW_WA", "Mode1Reset", "RlcPowerNotify", "SetHardMinIspiclkByFreq" , "SetHardMinIspxclkByFreq", "SetSoftMinSocclkByFreq", "PowerUpCvip" , "PowerDownCvip", "EnableGfxOff", "DisableGfxOff", "SetSoftMinGfxclk" , "SetSoftMinFclk", "GetThermalLimit", "GetCurrentTemperature" , "GetCurrentPower", "GetCurrentVoltage", "GetCurrentCurrent" , "GetAverageCpuActivity", "GetAverageGfxActivity", "GetAveragePower" , "GetAverageTemperature", "SetAveragePowerTimeConstant", "SetAverageActivityTimeConstant" , "SetAverageTemperatureTimeConstant", "SetMitigationEndHysteresis" , "GetCurrentFreq", "SetReducedPptLimit", "SetReducedThermalLimit" , "DramLogSetDramAddr", "StartDramLogging", "StopDramLogging" , "SetSoftMinCclk", "SetSoftMaxCclk", "SetGpoFeaturePMask", "DisallowGpo" , "Enable2ndUSB20Port", "RequestActiveWgp", "SetFastPPTLimit" , "SetSlowPPTLimit", "GetFastPPTLimit", "GetSlowPPTLimit", "EnableDeterminism" , "DisableDeterminism", "SetUclkDpmMode", "LightSBR", "GfxDriverResetRecovery" , "BoardPowerCalibration", "RequestGfxclk", "ForceGfxVid", "Spare0" , "UnforceGfxVid", "HeavySBR", "SetBadHBMPagesRetiredFlagsPerChannel" , "EnableGfxImu", "DriverMode2Reset", "GetGfxOffStatus", "GetGfxOffEntryCount" , "LogGfxOffResidency", "SetNumBadMemoryPagesRetired", "SetBadMemoryPagesRetiredFlagsPerChannel" , "AllowGpo", |
| 46 | }; |
| 47 | |
| 48 | #define smu_cmn_call_asic_func(intf, smu, args...)((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? (smu)-> ppt_funcs->intf(smu, args...) : -91) : -22) \ |
| 49 | ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \ |
| 50 | (smu)->ppt_funcs->intf(smu, ##args) : \ |
| 51 | -ENOTSUPP91) : \ |
| 52 | -EINVAL22) |
| 53 | |
| 54 | static const char *smu_get_message_name(struct smu_context *smu, |
| 55 | enum smu_message_type type) |
| 56 | { |
| 57 | if (type < 0 || type >= SMU_MSG_MAX_COUNT) |
| 58 | return "unknown smu message"; |
| 59 | |
| 60 | return __smu_message_names[type]; |
| 61 | } |
| 62 | |
| 63 | static void smu_cmn_read_arg(struct smu_context *smu, |
| 64 | uint32_t *arg) |
| 65 | { |
| 66 | struct amdgpu_device *adev = smu->adev; |
| 67 | |
| 68 | *arg = RREG32(smu->param_reg)amdgpu_device_rreg(adev, (smu->param_reg), 0); |
| 69 | } |
| 70 | |
| 71 | /* Redefine the SMU error codes here. |
| 72 | * |
| 73 | * Note that these definitions are redundant and should be removed |
| 74 | * when the SMU has exported a unified header file containing these |
| 75 | * macros, which header file we can just include and use the SMU's |
| 76 | * macros. At the moment, these error codes are defined by the SMU |
| 77 | * per-ASIC unfortunately, yet we're a one driver for all ASICs. |
| 78 | */ |
| 79 | #define SMU_RESP_NONE0 0 |
| 80 | #define SMU_RESP_OK1 1 |
| 81 | #define SMU_RESP_CMD_FAIL0xFF 0xFF |
| 82 | #define SMU_RESP_CMD_UNKNOWN0xFE 0xFE |
| 83 | #define SMU_RESP_CMD_BAD_PREREQ0xFD 0xFD |
| 84 | #define SMU_RESP_BUSY_OTHER0xFC 0xFC |
| 85 | #define SMU_RESP_DEBUG_END0xFB 0xFB |
| 86 | |
| 87 | /** |
| 88 | * __smu_cmn_poll_stat -- poll for a status from the SMU |
| 89 | * @smu: a pointer to SMU context |
| 90 | * |
| 91 | * Returns the status of the SMU, which could be, |
| 92 | * 0, the SMU is busy with your command; |
| 93 | * 1, execution status: success, execution result: success; |
| 94 | * 0xFF, execution status: success, execution result: failure; |
| 95 | * 0xFE, unknown command; |
| 96 | * 0xFD, valid command, but bad (command) prerequisites; |
| 97 | * 0xFC, the command was rejected as the SMU is busy; |
| 98 | * 0xFB, "SMC_Result_DebugDataDumpEnd". |
| 99 | * |
| 100 | * The values here are not defined by macros, because I'd rather we |
| 101 | * include a single header file which defines them, which is |
| 102 | * maintained by the SMU FW team, so that we're impervious to firmware |
| 103 | * changes. At the moment those values are defined in various header |
| 104 | * files, one for each ASIC, yet here we're a single ASIC-agnostic |
| 105 | * interface. Such a change can be followed-up by a subsequent patch. |
| 106 | */ |
| 107 | static u32 __smu_cmn_poll_stat(struct smu_context *smu) |
| 108 | { |
| 109 | struct amdgpu_device *adev = smu->adev; |
| 110 | int timeout = adev->usec_timeout * 20; |
| 111 | u32 reg; |
| 112 | |
| 113 | for ( ; timeout > 0; timeout--) { |
| 114 | reg = RREG32(smu->resp_reg)amdgpu_device_rreg(adev, (smu->resp_reg), 0); |
| 115 | if ((reg & MP1_C2PMSG_90__CONTENT_MASK0xFFFFFFFFL) != 0) |
| 116 | break; |
| 117 | |
| 118 | udelay(1); |
| 119 | } |
| 120 | |
| 121 | return reg; |
| 122 | } |
| 123 | |
| 124 | static void __smu_cmn_reg_print_error(struct smu_context *smu, |
| 125 | u32 reg_c2pmsg_90, |
| 126 | int msg_index, |
| 127 | u32 param, |
| 128 | enum smu_message_type msg) |
| 129 | { |
| 130 | struct amdgpu_device *adev = smu->adev; |
| 131 | const char *message = smu_get_message_name(smu, msg); |
| 132 | u32 msg_idx, prm; |
| 133 | |
| 134 | switch (reg_c2pmsg_90) { |
| 135 | case SMU_RESP_NONE0: { |
| 136 | msg_idx = RREG32(smu->msg_reg)amdgpu_device_rreg(adev, (smu->msg_reg), 0); |
| 137 | prm = RREG32(smu->param_reg)amdgpu_device_rreg(adev, (smu->param_reg), 0); |
| 138 | dev_err_ratelimited(adev->dev,printf("drm:pid%d:%s *ERROR* " "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , msg_idx , prm) |
| 139 | "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",printf("drm:pid%d:%s *ERROR* " "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , msg_idx , prm) |
| 140 | msg_idx, prm)printf("drm:pid%d:%s *ERROR* " "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , msg_idx , prm); |
| 141 | } |
| 142 | break; |
| 143 | case SMU_RESP_OK1: |
| 144 | /* The SMU executed the command. It completed with a |
| 145 | * successful result. |
| 146 | */ |
| 147 | break; |
| 148 | case SMU_RESP_CMD_FAIL0xFF: |
| 149 | /* The SMU executed the command. It completed with an |
| 150 | * unsuccessful result. |
| 151 | */ |
| 152 | break; |
| 153 | case SMU_RESP_CMD_UNKNOWN0xFE: |
| 154 | dev_err_ratelimited(adev->dev,printf("drm:pid%d:%s *ERROR* " "SMU: unknown command: index:%d param:0x%08X message:%s" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , msg_index , param, message) |
| 155 | "SMU: unknown command: index:%d param:0x%08X message:%s",printf("drm:pid%d:%s *ERROR* " "SMU: unknown command: index:%d param:0x%08X message:%s" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , msg_index , param, message) |
| 156 | msg_index, param, message)printf("drm:pid%d:%s *ERROR* " "SMU: unknown command: index:%d param:0x%08X message:%s" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , msg_index , param, message); |
| 157 | break; |
| 158 | case SMU_RESP_CMD_BAD_PREREQ0xFD: |
| 159 | dev_err_ratelimited(adev->dev,printf("drm:pid%d:%s *ERROR* " "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , msg_index , param, message) |
| 160 | "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",printf("drm:pid%d:%s *ERROR* " "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , msg_index , param, message) |
| 161 | msg_index, param, message)printf("drm:pid%d:%s *ERROR* " "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , msg_index , param, message); |
| 162 | break; |
| 163 | case SMU_RESP_BUSY_OTHER0xFC: |
| 164 | dev_err_ratelimited(adev->dev,printf("drm:pid%d:%s *ERROR* " "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , msg_index , param, message) |
| 165 | "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",printf("drm:pid%d:%s *ERROR* " "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , msg_index , param, message) |
| 166 | msg_index, param, message)printf("drm:pid%d:%s *ERROR* " "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , msg_index , param, message); |
| 167 | break; |
| 168 | case SMU_RESP_DEBUG_END0xFB: |
| 169 | dev_err_ratelimited(adev->dev,printf("drm:pid%d:%s *ERROR* " "SMU: I'm debugging!", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__) |
| 170 | "SMU: I'm debugging!")printf("drm:pid%d:%s *ERROR* " "SMU: I'm debugging!", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__); |
| 171 | break; |
| 172 | default: |
| 173 | dev_err_ratelimited(adev->dev,printf("drm:pid%d:%s *ERROR* " "SMU: response:0x%08X for index:%d param:0x%08X message:%s?" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , reg_c2pmsg_90 , msg_index, param, message) |
| 174 | "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",printf("drm:pid%d:%s *ERROR* " "SMU: response:0x%08X for index:%d param:0x%08X message:%s?" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , reg_c2pmsg_90 , msg_index, param, message) |
| 175 | reg_c2pmsg_90, msg_index, param, message)printf("drm:pid%d:%s *ERROR* " "SMU: response:0x%08X for index:%d param:0x%08X message:%s?" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , reg_c2pmsg_90 , msg_index, param, message); |
| 176 | break; |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90) |
| 181 | { |
| 182 | int res; |
| 183 | |
| 184 | switch (reg_c2pmsg_90) { |
| 185 | case SMU_RESP_NONE0: |
| 186 | /* The SMU is busy--still executing your command. |
| 187 | */ |
| 188 | res = -ETIME60; |
| 189 | break; |
| 190 | case SMU_RESP_OK1: |
| 191 | res = 0; |
| 192 | break; |
| 193 | case SMU_RESP_CMD_FAIL0xFF: |
| 194 | /* Command completed successfully, but the command |
| 195 | * status was failure. |
| 196 | */ |
| 197 | res = -EIO5; |
| 198 | break; |
| 199 | case SMU_RESP_CMD_UNKNOWN0xFE: |
| 200 | /* Unknown command--ignored by the SMU. |
| 201 | */ |
| 202 | res = -EOPNOTSUPP45; |
| 203 | break; |
| 204 | case SMU_RESP_CMD_BAD_PREREQ0xFD: |
| 205 | /* Valid command--bad prerequisites. |
| 206 | */ |
| 207 | res = -EINVAL22; |
| 208 | break; |
| 209 | case SMU_RESP_BUSY_OTHER0xFC: |
| 210 | /* The SMU is busy with other commands. The client |
| 211 | * should retry in 10 us. |
| 212 | */ |
| 213 | res = -EBUSY16; |
| 214 | break; |
| 215 | default: |
| 216 | /* Unknown or debug response from the SMU. |
| 217 | */ |
| 218 | res = -EREMOTEIO5; |
| 219 | break; |
| 220 | } |
| 221 | |
| 222 | return res; |
| 223 | } |
| 224 | |
| 225 | static void __smu_cmn_send_msg(struct smu_context *smu, |
| 226 | u16 msg, |
| 227 | u32 param) |
| 228 | { |
| 229 | struct amdgpu_device *adev = smu->adev; |
| 230 | |
| 231 | WREG32(smu->resp_reg, 0)amdgpu_device_wreg(adev, (smu->resp_reg), (0), 0); |
| 232 | WREG32(smu->param_reg, param)amdgpu_device_wreg(adev, (smu->param_reg), (param), 0); |
| 233 | WREG32(smu->msg_reg, msg)amdgpu_device_wreg(adev, (smu->msg_reg), (msg), 0); |
| 234 | } |
| 235 | |
| 236 | static int __smu_cmn_send_debug_msg(struct smu_context *smu, |
| 237 | u32 msg, |
| 238 | u32 param) |
| 239 | { |
| 240 | struct amdgpu_device *adev = smu->adev; |
| 241 | |
| 242 | WREG32(smu->debug_param_reg, param)amdgpu_device_wreg(adev, (smu->debug_param_reg), (param), 0 ); |
| 243 | WREG32(smu->debug_msg_reg, msg)amdgpu_device_wreg(adev, (smu->debug_msg_reg), (msg), 0); |
| 244 | WREG32(smu->debug_resp_reg, 0)amdgpu_device_wreg(adev, (smu->debug_resp_reg), (0), 0); |
| 245 | |
| 246 | return 0; |
| 247 | } |
| 248 | /** |
| 249 | * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status |
| 250 | * @smu: pointer to an SMU context |
| 251 | * @msg_index: message index |
| 252 | * @param: message parameter to send to the SMU |
| 253 | * |
| 254 | * Send a message to the SMU with the parameter passed. Do not wait |
| 255 | * for status/result of the message, thus the "without_waiting". |
| 256 | * |
| 257 | * Return 0 on success, -errno on error if we weren't able to _send_ |
| 258 | * the message for some reason. See __smu_cmn_reg2errno() for details |
| 259 | * of the -errno. |
| 260 | */ |
| 261 | int smu_cmn_send_msg_without_waiting(struct smu_context *smu, |
| 262 | uint16_t msg_index, |
| 263 | uint32_t param) |
| 264 | { |
| 265 | struct amdgpu_device *adev = smu->adev; |
| 266 | u32 reg; |
| 267 | int res; |
| 268 | |
| 269 | if (adev->no_hw_access) |
| 270 | return 0; |
| 271 | |
| 272 | reg = __smu_cmn_poll_stat(smu); |
| 273 | res = __smu_cmn_reg2errno(smu, reg); |
| 274 | if (reg == SMU_RESP_NONE0 || |
| 275 | res == -EREMOTEIO5) |
| 276 | goto Out; |
| 277 | __smu_cmn_send_msg(smu, msg_index, param); |
| 278 | res = 0; |
| 279 | Out: |
| 280 | if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR)__builtin_expect(!!(adev->pm.smu_debug_mask & 0x1), 0) && |
| 281 | res && (res != -ETIME60)) { |
| 282 | amdgpu_device_halt(adev); |
| 283 | WARN_ON(1)({ int __ret = !!(1); if (__ret) printf("WARNING %s failed at %s:%d\n" , "1", "/usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu_cmn.c", 283 ); __builtin_expect(!!(__ret), 0); }); |
| 284 | } |
| 285 | |
| 286 | return res; |
| 287 | } |
| 288 | |
| 289 | /** |
| 290 | * smu_cmn_wait_for_response -- wait for response from the SMU |
| 291 | * @smu: pointer to an SMU context |
| 292 | * |
| 293 | * Wait for status from the SMU. |
| 294 | * |
| 295 | * Return 0 on success, -errno on error, indicating the execution |
| 296 | * status and result of the message being waited for. See |
| 297 | * __smu_cmn_reg2errno() for details of the -errno. |
| 298 | */ |
| 299 | int smu_cmn_wait_for_response(struct smu_context *smu) |
| 300 | { |
| 301 | u32 reg; |
| 302 | int res; |
| 303 | |
| 304 | reg = __smu_cmn_poll_stat(smu); |
| 305 | res = __smu_cmn_reg2errno(smu, reg); |
| 306 | |
| 307 | if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR)__builtin_expect(!!(smu->adev->pm.smu_debug_mask & 0x1 ), 0) && |
| 308 | res && (res != -ETIME60)) { |
| 309 | amdgpu_device_halt(smu->adev); |
| 310 | WARN_ON(1)({ int __ret = !!(1); if (__ret) printf("WARNING %s failed at %s:%d\n" , "1", "/usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu_cmn.c", 310 ); __builtin_expect(!!(__ret), 0); }); |
| 311 | } |
| 312 | |
| 313 | return res; |
| 314 | } |
| 315 | |
| 316 | /** |
| 317 | * smu_cmn_send_smc_msg_with_param -- send a message with parameter |
| 318 | * @smu: pointer to an SMU context |
| 319 | * @msg: message to send |
| 320 | * @param: parameter to send to the SMU |
| 321 | * @read_arg: pointer to u32 to return a value from the SMU back |
| 322 | * to the caller |
| 323 | * |
| 324 | * Send the message @msg with parameter @param to the SMU, wait for |
| 325 | * completion of the command, and return back a value from the SMU in |
| 326 | * @read_arg pointer. |
| 327 | * |
| 328 | * Return 0 on success, -errno when a problem is encountered sending |
| 329 | * message or receiving reply. If there is a PCI bus recovery or |
| 330 | * the destination is a virtual GPU which does not allow this message |
| 331 | * type, the message is simply dropped and success is also returned. |
| 332 | * See __smu_cmn_reg2errno() for details of the -errno. |
| 333 | * |
| 334 | * If we weren't able to send the message to the SMU, we also print |
| 335 | * the error to the standard log. |
| 336 | * |
| 337 | * Command completion status is printed only if the -errno is |
| 338 | * -EREMOTEIO, indicating that the SMU returned back an |
| 339 | * undefined/unknown/unspecified result. All other cases are |
| 340 | * well-defined, not printed, but instead given back to the client to |
| 341 | * decide what further to do. |
| 342 | * |
| 343 | * The return value, @read_arg is read back regardless, to give back |
| 344 | * more information to the client, which on error would most likely be |
| 345 | * @param, but we can't assume that. This also eliminates more |
| 346 | * conditionals. |
| 347 | */ |
| 348 | int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, |
| 349 | enum smu_message_type msg, |
| 350 | uint32_t param, |
| 351 | uint32_t *read_arg) |
| 352 | { |
| 353 | struct amdgpu_device *adev = smu->adev; |
| 354 | int res, index; |
| 355 | u32 reg; |
| 356 | |
| 357 | if (adev->no_hw_access) |
| 358 | return 0; |
| 359 | |
| 360 | index = smu_cmn_to_asic_specific_index(smu, |
| 361 | CMN2ASIC_MAPPING_MSG, |
| 362 | msg); |
| 363 | if (index < 0) |
| 364 | return index == -EACCES13 ? 0 : index; |
| 365 | |
| 366 | mutex_lock(&smu->message_lock)rw_enter_write(&smu->message_lock); |
| 367 | reg = __smu_cmn_poll_stat(smu); |
| 368 | res = __smu_cmn_reg2errno(smu, reg); |
| 369 | if (reg == SMU_RESP_NONE0 || |
| 370 | res == -EREMOTEIO5) { |
| 371 | __smu_cmn_reg_print_error(smu, reg, index, param, msg); |
| 372 | goto Out; |
| 373 | } |
| 374 | __smu_cmn_send_msg(smu, (uint16_t) index, param); |
| 375 | reg = __smu_cmn_poll_stat(smu); |
| 376 | res = __smu_cmn_reg2errno(smu, reg); |
| 377 | if (res != 0) |
| 378 | __smu_cmn_reg_print_error(smu, reg, index, param, msg); |
| 379 | if (read_arg) |
| 380 | smu_cmn_read_arg(smu, read_arg); |
| 381 | Out: |
| 382 | if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR)__builtin_expect(!!(adev->pm.smu_debug_mask & 0x1), 0) && res) { |
| 383 | amdgpu_device_halt(adev); |
| 384 | WARN_ON(1)({ int __ret = !!(1); if (__ret) printf("WARNING %s failed at %s:%d\n" , "1", "/usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu_cmn.c", 384 ); __builtin_expect(!!(__ret), 0); }); |
| 385 | } |
| 386 | |
| 387 | mutex_unlock(&smu->message_lock)rw_exit_write(&smu->message_lock); |
| 388 | return res; |
| 389 | } |
| 390 | |
| 391 | int smu_cmn_send_smc_msg(struct smu_context *smu, |
| 392 | enum smu_message_type msg, |
| 393 | uint32_t *read_arg) |
| 394 | { |
| 395 | return smu_cmn_send_smc_msg_with_param(smu, |
| 396 | msg, |
| 397 | 0, |
| 398 | read_arg); |
| 399 | } |
| 400 | |
| 401 | int smu_cmn_send_debug_smc_msg(struct smu_context *smu, |
| 402 | uint32_t msg) |
| 403 | { |
| 404 | return __smu_cmn_send_debug_msg(smu, msg, 0); |
| 405 | } |
| 406 | |
| 407 | int smu_cmn_to_asic_specific_index(struct smu_context *smu, |
| 408 | enum smu_cmn2asic_mapping_type type, |
| 409 | uint32_t index) |
| 410 | { |
| 411 | struct cmn2asic_msg_mapping msg_mapping; |
| 412 | struct cmn2asic_mapping mapping; |
| 413 | |
| 414 | switch (type) { |
| 415 | case CMN2ASIC_MAPPING_MSG: |
| 416 | if (index >= SMU_MSG_MAX_COUNT || |
| 417 | !smu->message_map) |
| 418 | return -EINVAL22; |
| 419 | |
| 420 | msg_mapping = smu->message_map[index]; |
| 421 | if (!msg_mapping.valid_mapping) |
| 422 | return -EINVAL22; |
| 423 | |
| 424 | if (amdgpu_sriov_vf(smu->adev)((smu->adev)->virt.caps & (1 << 2)) && |
| 425 | !msg_mapping.valid_in_vf) |
| 426 | return -EACCES13; |
| 427 | |
| 428 | return msg_mapping.map_to; |
| 429 | |
| 430 | case CMN2ASIC_MAPPING_CLK: |
| 431 | if (index >= SMU_CLK_COUNT || |
| 432 | !smu->clock_map) |
| 433 | return -EINVAL22; |
| 434 | |
| 435 | mapping = smu->clock_map[index]; |
| 436 | if (!mapping.valid_mapping) |
| 437 | return -EINVAL22; |
| 438 | |
| 439 | return mapping.map_to; |
| 440 | |
| 441 | case CMN2ASIC_MAPPING_FEATURE: |
| 442 | if (index >= SMU_FEATURE_COUNT || |
| 443 | !smu->feature_map) |
| 444 | return -EINVAL22; |
| 445 | |
| 446 | mapping = smu->feature_map[index]; |
| 447 | if (!mapping.valid_mapping) |
| 448 | return -EINVAL22; |
| 449 | |
| 450 | return mapping.map_to; |
| 451 | |
| 452 | case CMN2ASIC_MAPPING_TABLE: |
| 453 | if (index >= SMU_TABLE_COUNT || |
| 454 | !smu->table_map) |
| 455 | return -EINVAL22; |
| 456 | |
| 457 | mapping = smu->table_map[index]; |
| 458 | if (!mapping.valid_mapping) |
| 459 | return -EINVAL22; |
| 460 | |
| 461 | return mapping.map_to; |
| 462 | |
| 463 | case CMN2ASIC_MAPPING_PWR: |
| 464 | if (index >= SMU_POWER_SOURCE_COUNT || |
| 465 | !smu->pwr_src_map) |
| 466 | return -EINVAL22; |
| 467 | |
| 468 | mapping = smu->pwr_src_map[index]; |
| 469 | if (!mapping.valid_mapping) |
| 470 | return -EINVAL22; |
| 471 | |
| 472 | return mapping.map_to; |
| 473 | |
| 474 | case CMN2ASIC_MAPPING_WORKLOAD: |
| 475 | if (index > PP_SMC_POWER_PROFILE_WINDOW3D || |
| 476 | !smu->workload_map) |
| 477 | return -EINVAL22; |
| 478 | |
| 479 | mapping = smu->workload_map[index]; |
| 480 | if (!mapping.valid_mapping) |
| 481 | return -EINVAL22; |
| 482 | |
| 483 | return mapping.map_to; |
| 484 | |
| 485 | default: |
| 486 | return -EINVAL22; |
| 487 | } |
| 488 | } |
| 489 | |
| 490 | int smu_cmn_feature_is_supported(struct smu_context *smu, |
| 491 | enum smu_feature_mask mask) |
| 492 | { |
| 493 | struct smu_feature *feature = &smu->smu_feature; |
| 494 | int feature_id; |
| 495 | |
| 496 | feature_id = smu_cmn_to_asic_specific_index(smu, |
| 497 | CMN2ASIC_MAPPING_FEATURE, |
| 498 | mask); |
| 499 | if (feature_id < 0) |
| 500 | return 0; |
| 501 | |
| 502 | WARN_ON(feature_id > feature->feature_num)({ int __ret = !!(feature_id > feature->feature_num); if (__ret) printf("WARNING %s failed at %s:%d\n", "feature_id > feature->feature_num" , "/usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu_cmn.c", 502); __builtin_expect (!!(__ret), 0); }); |
| 503 | |
| 504 | return test_bit(feature_id, feature->supported); |
| 505 | } |
| 506 | |
| 507 | static int __smu_get_enabled_features(struct smu_context *smu, |
| 508 | uint64_t *enabled_features) |
| 509 | { |
| 510 | return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_enabled_mask ? (smu)->ppt_funcs->get_enabled_mask(smu, enabled_features ) : -91) : -22); |
| 511 | } |
| 512 | |
| 513 | int smu_cmn_feature_is_enabled(struct smu_context *smu, |
| 514 | enum smu_feature_mask mask) |
| 515 | { |
| 516 | struct amdgpu_device *adev = smu->adev; |
Value stored to 'adev' during its initialization is never read | |
| 517 | uint64_t enabled_features; |
| 518 | int feature_id; |
| 519 | |
| 520 | if (__smu_get_enabled_features(smu, &enabled_features)) { |
| 521 | dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n")printf("drm:pid%d:%s *ERROR* " "Failed to retrieve enabled ppfeatures!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
| 522 | return 0; |
| 523 | } |
| 524 | |
| 525 | /* |
| 526 | * For Renoir and Cyan Skillfish, they are assumed to have all features |
| 527 | * enabled. Also considering they have no feature_map available, the |
| 528 | * check here can avoid unwanted feature_map check below. |
| 529 | */ |
| 530 | if (enabled_features == ULLONG_MAX0xffffffffffffffffULL) |
| 531 | return 1; |
| 532 | |
| 533 | feature_id = smu_cmn_to_asic_specific_index(smu, |
| 534 | CMN2ASIC_MAPPING_FEATURE, |
| 535 | mask); |
| 536 | if (feature_id < 0) |
| 537 | return 0; |
| 538 | |
| 539 | return test_bit(feature_id, (unsigned long *)&enabled_features); |
| 540 | } |
| 541 | |
| 542 | bool_Bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, |
| 543 | enum smu_clk_type clk_type) |
| 544 | { |
| 545 | enum smu_feature_mask feature_id = 0; |
| 546 | |
| 547 | switch (clk_type) { |
| 548 | case SMU_MCLK: |
| 549 | case SMU_UCLK: |
| 550 | feature_id = SMU_FEATURE_DPM_UCLK_BIT; |
| 551 | break; |
| 552 | case SMU_GFXCLK: |
| 553 | case SMU_SCLK: |
| 554 | feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; |
| 555 | break; |
| 556 | case SMU_SOCCLK: |
| 557 | feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; |
| 558 | break; |
| 559 | case SMU_VCLK: |
| 560 | case SMU_VCLK1: |
| 561 | feature_id = SMU_FEATURE_DPM_VCLK_BIT; |
| 562 | break; |
| 563 | case SMU_DCLK: |
| 564 | case SMU_DCLK1: |
| 565 | feature_id = SMU_FEATURE_DPM_DCLK_BIT; |
| 566 | break; |
| 567 | case SMU_FCLK: |
| 568 | feature_id = SMU_FEATURE_DPM_FCLK_BIT; |
| 569 | break; |
| 570 | default: |
| 571 | return true1; |
| 572 | } |
| 573 | |
| 574 | if (!smu_cmn_feature_is_enabled(smu, feature_id)) |
| 575 | return false0; |
| 576 | |
| 577 | return true1; |
| 578 | } |
| 579 | |
| 580 | int smu_cmn_get_enabled_mask(struct smu_context *smu, |
| 581 | uint64_t *feature_mask) |
| 582 | { |
| 583 | uint32_t *feature_mask_high; |
| 584 | uint32_t *feature_mask_low; |
| 585 | int ret = 0, index = 0; |
| 586 | |
| 587 | if (!feature_mask) |
| 588 | return -EINVAL22; |
| 589 | |
| 590 | feature_mask_low = &((uint32_t *)feature_mask)[0]; |
| 591 | feature_mask_high = &((uint32_t *)feature_mask)[1]; |
| 592 | |
| 593 | index = smu_cmn_to_asic_specific_index(smu, |
| 594 | CMN2ASIC_MAPPING_MSG, |
| 595 | SMU_MSG_GetEnabledSmuFeatures); |
| 596 | if (index > 0) { |
| 597 | ret = smu_cmn_send_smc_msg_with_param(smu, |
| 598 | SMU_MSG_GetEnabledSmuFeatures, |
| 599 | 0, |
| 600 | feature_mask_low); |
| 601 | if (ret) |
| 602 | return ret; |
| 603 | |
| 604 | ret = smu_cmn_send_smc_msg_with_param(smu, |
| 605 | SMU_MSG_GetEnabledSmuFeatures, |
| 606 | 1, |
| 607 | feature_mask_high); |
| 608 | } else { |
| 609 | ret = smu_cmn_send_smc_msg(smu, |
| 610 | SMU_MSG_GetEnabledSmuFeaturesHigh, |
| 611 | feature_mask_high); |
| 612 | if (ret) |
| 613 | return ret; |
| 614 | |
| 615 | ret = smu_cmn_send_smc_msg(smu, |
| 616 | SMU_MSG_GetEnabledSmuFeaturesLow, |
| 617 | feature_mask_low); |
| 618 | } |
| 619 | |
| 620 | return ret; |
| 621 | } |
| 622 | |
| 623 | uint64_t smu_cmn_get_indep_throttler_status( |
| 624 | const unsigned long dep_status, |
| 625 | const uint8_t *throttler_map) |
| 626 | { |
| 627 | uint64_t indep_status = 0; |
| 628 | uint8_t dep_bit = 0; |
| 629 | |
| 630 | for_each_set_bit(dep_bit, (unsigned long *)&dep_status, 32)for ((dep_bit) = find_first_bit(((unsigned long *)&dep_status ), (32)); (dep_bit) < (32); (dep_bit) = find_next_bit(((unsigned long *)&dep_status), (32), (dep_bit) + 1)) |
| 631 | indep_status |= 1ULL << throttler_map[dep_bit]; |
| 632 | |
| 633 | return indep_status; |
| 634 | } |
| 635 | |
| 636 | int smu_cmn_feature_update_enable_state(struct smu_context *smu, |
| 637 | uint64_t feature_mask, |
| 638 | bool_Bool enabled) |
| 639 | { |
| 640 | int ret = 0; |
| 641 | |
| 642 | if (enabled) { |
| 643 | ret = smu_cmn_send_smc_msg_with_param(smu, |
| 644 | SMU_MSG_EnableSmuFeaturesLow, |
| 645 | lower_32_bits(feature_mask)((u32)(feature_mask)), |
| 646 | NULL((void *)0)); |
| 647 | if (ret) |
| 648 | return ret; |
| 649 | ret = smu_cmn_send_smc_msg_with_param(smu, |
| 650 | SMU_MSG_EnableSmuFeaturesHigh, |
| 651 | upper_32_bits(feature_mask)((u32)(((feature_mask) >> 16) >> 16)), |
| 652 | NULL((void *)0)); |
| 653 | } else { |
| 654 | ret = smu_cmn_send_smc_msg_with_param(smu, |
| 655 | SMU_MSG_DisableSmuFeaturesLow, |
| 656 | lower_32_bits(feature_mask)((u32)(feature_mask)), |
| 657 | NULL((void *)0)); |
| 658 | if (ret) |
| 659 | return ret; |
| 660 | ret = smu_cmn_send_smc_msg_with_param(smu, |
| 661 | SMU_MSG_DisableSmuFeaturesHigh, |
| 662 | upper_32_bits(feature_mask)((u32)(((feature_mask) >> 16) >> 16)), |
| 663 | NULL((void *)0)); |
| 664 | } |
| 665 | |
| 666 | return ret; |
| 667 | } |
| 668 | |
| 669 | int smu_cmn_feature_set_enabled(struct smu_context *smu, |
| 670 | enum smu_feature_mask mask, |
| 671 | bool_Bool enable) |
| 672 | { |
| 673 | int feature_id; |
| 674 | |
| 675 | feature_id = smu_cmn_to_asic_specific_index(smu, |
| 676 | CMN2ASIC_MAPPING_FEATURE, |
| 677 | mask); |
| 678 | if (feature_id < 0) |
| 679 | return -EINVAL22; |
| 680 | |
| 681 | return smu_cmn_feature_update_enable_state(smu, |
| 682 | 1ULL << feature_id, |
| 683 | enable); |
| 684 | } |
| 685 | |
| 686 | #undef __SMU_DUMMY_MAP |
| 687 | #define __SMU_DUMMY_MAP(fea)"fea" #fea |
| 688 | static const char* __smu_feature_names[] = { |
| 689 | SMU_FEATURE_MASKS"DPM_PREFETCHER", "DPM_GFXCLK", "DPM_UCLK", "DPM_SOCCLK", "DPM_UVD" , "DPM_VCE", "DPM_LCLK", "ULV", "DPM_MP0CLK", "DPM_LINK", "DPM_DCEFCLK" , "DPM_XGMI", "DS_GFXCLK", "DS_SOCCLK", "DS_LCLK", "PPT", "TDC" , "THERMAL", "GFX_PER_CU_CG", "DATA_CALCULATIONS", "RM", "DS_DCEFCLK" , "ACDC", "VR0HOT", "VR1HOT", "FW_CTF", "LED_DISPLAY", "FAN_CONTROL" , "GFX_EDC", "GFXOFF", "CG", "DPM_FCLK", "DS_FCLK", "DS_MP1CLK" , "DS_MP0CLK", "XGMI_PER_LINK_PWR_DWN", "DPM_GFX_PACE", "MEM_VDDCI_SCALING" , "MEM_MVDD_SCALING", "DS_UCLK", "GFX_ULV", "FW_DSTATE", "BACO" , "VCN_PG", "MM_DPM_PG", "JPEG_PG", "USB_PG", "RSMU_SMN_CG", "APCC_PLUS" , "GTHR", "GFX_DCS", "GFX_SS", "OUT_OF_BAND_MONITOR", "TEMP_DEPENDENT_VMIN" , "MMHUB_PG", "ATHUB_PG", "APCC_DFLL", "DF_CSTATE", "DPM_GFX_GPO" , "WAFL_CG", "CCLK_DPM", "FAN_CONTROLLER", "VCN_DPM", "LCLK_DPM" , "SHUBCLK_DPM", "DCFCLK_DPM", "DS_DCFCLK", "S0I2", "SMU_LOW_POWER" , "GFX_DEM", "PSI", "PROCHOT", "CPUOFF", "STAPM", "S0I3", "DF_CSTATES" , "PERF_LIMIT", "CORE_DLDO", "RSMU_LOW_POWER", "SMN_LOW_POWER" , "THM_LOW_POWER", "SMUIO_LOW_POWER", "MP1_LOW_POWER", "DS_VCN" , "CPPC", "OS_CSTATES", "ISP_DPM", "A55_DPM", "CVIP_DSP_DPM", "MSMU_LOW_POWER", "FUSE_CG", "MP1_CG", "SMUIO_CG", "THM_CG", "CLK_CG", "DATA_CALCULATION", "DPM_VCLK", "DPM_DCLK", "FW_DATA_READ" , "DPM_GFX_POWER_OPTIMIZER", "DPM_DCN", "VMEMP_SCALING", "VDDIO_MEM_SCALING" , "MM_DPM", "SOC_MPCLK_DS", "BACO_MPCLK_DS", "THROTTLERS", "SMARTSHIFT" , "GFX_READ_MARGIN", "GFX_IMU", "GFX_PCC_DFLL", "BOOT_TIME_CAL" , "BOOT_POWER_OPT", "GFXCLK_SPREAD_SPECTRUM", "SOC_PCC", "OPTIMIZED_VMIN" , "CLOCK_POWER_DOWN_BYPASS", "MEM_TEMP_READ", "ATHUB_MMHUB_PG" , "BACO_CG", "SOC_CG", |
| 690 | }; |
| 691 | |
| 692 | static const char *smu_get_feature_name(struct smu_context *smu, |
| 693 | enum smu_feature_mask feature) |
| 694 | { |
| 695 | if (feature < 0 || feature >= SMU_FEATURE_COUNT) |
| 696 | return "unknown smu feature"; |
| 697 | return __smu_feature_names[feature]; |
| 698 | } |
| 699 | |
| 700 | size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, |
| 701 | char *buf) |
| 702 | { |
| 703 | int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)(((SMU_FEATURE_COUNT)>((64)))?(SMU_FEATURE_COUNT):((64)))]; |
| 704 | uint64_t feature_mask; |
| 705 | int i, feature_index; |
| 706 | uint32_t count = 0; |
| 707 | size_t size = 0; |
| 708 | |
| 709 | if (__smu_get_enabled_features(smu, &feature_mask)) |
| 710 | return 0; |
| 711 | |
| 712 | size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n", |
| 713 | upper_32_bits(feature_mask)((u32)(((feature_mask) >> 16) >> 16)), lower_32_bits(feature_mask)((u32)(feature_mask))); |
| 714 | |
| 715 | memset(sort_feature, -1, sizeof(sort_feature))__builtin_memset((sort_feature), (-1), (sizeof(sort_feature)) ); |
| 716 | |
| 717 | for (i = 0; i < SMU_FEATURE_COUNT; i++) { |
| 718 | feature_index = smu_cmn_to_asic_specific_index(smu, |
| 719 | CMN2ASIC_MAPPING_FEATURE, |
| 720 | i); |
| 721 | if (feature_index < 0) |
| 722 | continue; |
| 723 | |
| 724 | sort_feature[feature_index] = i; |
| 725 | } |
| 726 | |
| 727 | size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n", |
| 728 | "No", "Feature", "Bit", "State"); |
| 729 | |
| 730 | for (feature_index = 0; feature_index < SMU_FEATURE_MAX(64); feature_index++) { |
| 731 | if (sort_feature[feature_index] < 0) |
| 732 | continue; |
| 733 | |
| 734 | size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n", |
| 735 | count++, |
| 736 | smu_get_feature_name(smu, sort_feature[feature_index]), |
| 737 | feature_index, |
| 738 | !!test_bit(feature_index, (unsigned long *)&feature_mask) ? |
| 739 | "enabled" : "disabled"); |
| 740 | } |
| 741 | |
| 742 | return size; |
| 743 | } |
| 744 | |
| 745 | int smu_cmn_set_pp_feature_mask(struct smu_context *smu, |
| 746 | uint64_t new_mask) |
| 747 | { |
| 748 | int ret = 0; |
| 749 | uint64_t feature_mask; |
| 750 | uint64_t feature_2_enabled = 0; |
| 751 | uint64_t feature_2_disabled = 0; |
| 752 | |
| 753 | ret = __smu_get_enabled_features(smu, &feature_mask); |
| 754 | if (ret) |
| 755 | return ret; |
| 756 | |
| 757 | feature_2_enabled = ~feature_mask & new_mask; |
| 758 | feature_2_disabled = feature_mask & ~new_mask; |
| 759 | |
| 760 | if (feature_2_enabled) { |
| 761 | ret = smu_cmn_feature_update_enable_state(smu, |
| 762 | feature_2_enabled, |
| 763 | true1); |
| 764 | if (ret) |
| 765 | return ret; |
| 766 | } |
| 767 | if (feature_2_disabled) { |
| 768 | ret = smu_cmn_feature_update_enable_state(smu, |
| 769 | feature_2_disabled, |
| 770 | false0); |
| 771 | if (ret) |
| 772 | return ret; |
| 773 | } |
| 774 | |
| 775 | return ret; |
| 776 | } |
| 777 | |
| 778 | /** |
| 779 | * smu_cmn_disable_all_features_with_exception - disable all dpm features |
| 780 | * except this specified by |
| 781 | * @mask |
| 782 | * |
| 783 | * @smu: smu_context pointer |
| 784 | * @mask: the dpm feature which should not be disabled |
| 785 | * SMU_FEATURE_COUNT: no exception, all dpm features |
| 786 | * to disable |
| 787 | * |
| 788 | * Returns: |
| 789 | * 0 on success or a negative error code on failure. |
| 790 | */ |
| 791 | int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, |
| 792 | enum smu_feature_mask mask) |
| 793 | { |
| 794 | uint64_t features_to_disable = U64_MAX0xffffffffffffffffULL; |
| 795 | int skipped_feature_id; |
| 796 | |
| 797 | if (mask != SMU_FEATURE_COUNT) { |
| 798 | skipped_feature_id = smu_cmn_to_asic_specific_index(smu, |
| 799 | CMN2ASIC_MAPPING_FEATURE, |
| 800 | mask); |
| 801 | if (skipped_feature_id < 0) |
| 802 | return -EINVAL22; |
| 803 | |
| 804 | features_to_disable &= ~(1ULL << skipped_feature_id); |
| 805 | } |
| 806 | |
| 807 | return smu_cmn_feature_update_enable_state(smu, |
| 808 | features_to_disable, |
| 809 | 0); |
| 810 | } |
| 811 | |
| 812 | int smu_cmn_get_smc_version(struct smu_context *smu, |
| 813 | uint32_t *if_version, |
| 814 | uint32_t *smu_version) |
| 815 | { |
| 816 | int ret = 0; |
| 817 | |
| 818 | if (!if_version && !smu_version) |
| 819 | return -EINVAL22; |
| 820 | |
| 821 | if (smu->smc_fw_if_version && smu->smc_fw_version) |
| 822 | { |
| 823 | if (if_version) |
| 824 | *if_version = smu->smc_fw_if_version; |
| 825 | |
| 826 | if (smu_version) |
| 827 | *smu_version = smu->smc_fw_version; |
| 828 | |
| 829 | return 0; |
| 830 | } |
| 831 | |
| 832 | if (if_version) { |
| 833 | ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); |
| 834 | if (ret) |
| 835 | return ret; |
| 836 | |
| 837 | smu->smc_fw_if_version = *if_version; |
| 838 | } |
| 839 | |
| 840 | if (smu_version) { |
| 841 | ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); |
| 842 | if (ret) |
| 843 | return ret; |
| 844 | |
| 845 | smu->smc_fw_version = *smu_version; |
| 846 | } |
| 847 | |
| 848 | return ret; |
| 849 | } |
| 850 | |
| 851 | int smu_cmn_update_table(struct smu_context *smu, |
| 852 | enum smu_table_id table_index, |
| 853 | int argument, |
| 854 | void *table_data, |
| 855 | bool_Bool drv2smu) |
| 856 | { |
| 857 | struct smu_table_context *smu_table = &smu->smu_table; |
| 858 | struct amdgpu_device *adev = smu->adev; |
| 859 | struct smu_table *table = &smu_table->driver_table; |
| 860 | int table_id = smu_cmn_to_asic_specific_index(smu, |
| 861 | CMN2ASIC_MAPPING_TABLE, |
| 862 | table_index); |
| 863 | uint32_t table_size; |
| 864 | int ret = 0; |
| 865 | if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) |
| 866 | return -EINVAL22; |
| 867 | |
| 868 | table_size = smu_table->tables[table_index].size; |
| 869 | |
| 870 | if (drv2smu) { |
| 871 | memcpy(table->cpu_addr, table_data, table_size)__builtin_memcpy((table->cpu_addr), (table_data), (table_size )); |
| 872 | /* |
| 873 | * Flush hdp cache: to guard the content seen by |
| 874 | * GPU is consitent with CPU. |
| 875 | */ |
| 876 | amdgpu_asic_flush_hdp(adev, NULL)((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs-> flush_hdp((adev), (((void *)0))) : (adev)->hdp.funcs->flush_hdp ((adev), (((void *)0)))); |
| 877 | } |
| 878 | |
| 879 | ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? |
| 880 | SMU_MSG_TransferTableDram2Smu : |
| 881 | SMU_MSG_TransferTableSmu2Dram, |
| 882 | table_id | ((argument & 0xFFFF) << 16), |
| 883 | NULL((void *)0)); |
| 884 | if (ret) |
| 885 | return ret; |
| 886 | |
| 887 | if (!drv2smu) { |
| 888 | amdgpu_asic_invalidate_hdp(adev, NULL)((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs ->invalidate_hdp((adev), (((void *)0))) : ((adev)->hdp. funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp ((adev), (((void *)0))) : 0)); |
| 889 | memcpy(table_data, table->cpu_addr, table_size)__builtin_memcpy((table_data), (table->cpu_addr), (table_size )); |
| 890 | } |
| 891 | |
| 892 | return 0; |
| 893 | } |
| 894 | |
| 895 | int smu_cmn_write_watermarks_table(struct smu_context *smu) |
| 896 | { |
| 897 | void *watermarks_table = smu->smu_table.watermarks_table; |
| 898 | |
| 899 | if (!watermarks_table) |
| 900 | return -EINVAL22; |
| 901 | |
| 902 | return smu_cmn_update_table(smu, |
| 903 | SMU_TABLE_WATERMARKS, |
| 904 | 0, |
| 905 | watermarks_table, |
| 906 | true1); |
| 907 | } |
| 908 | |
| 909 | int smu_cmn_write_pptable(struct smu_context *smu) |
| 910 | { |
| 911 | void *pptable = smu->smu_table.driver_pptable; |
| 912 | |
| 913 | return smu_cmn_update_table(smu, |
| 914 | SMU_TABLE_PPTABLE, |
| 915 | 0, |
| 916 | pptable, |
| 917 | true1); |
| 918 | } |
| 919 | |
| 920 | int smu_cmn_get_metrics_table(struct smu_context *smu, |
| 921 | void *metrics_table, |
| 922 | bool_Bool bypass_cache) |
| 923 | { |
| 924 | struct smu_table_context *smu_table= &smu->smu_table; |
| 925 | uint32_t table_size = |
| 926 | smu_table->tables[SMU_TABLE_SMU_METRICS].size; |
| 927 | int ret = 0; |
| 928 | |
| 929 | if (bypass_cache || |
| 930 | !smu_table->metrics_time || |
| 931 | time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1)(((uint64_t)(1)) * hz / 1000))) { |
| 932 | ret = smu_cmn_update_table(smu, |
| 933 | SMU_TABLE_SMU_METRICS, |
| 934 | 0, |
| 935 | smu_table->metrics_table, |
| 936 | false0); |
| 937 | if (ret) { |
| 938 | dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n")do { } while(0); |
| 939 | return ret; |
| 940 | } |
| 941 | smu_table->metrics_time = jiffies; |
| 942 | } |
| 943 | |
| 944 | if (metrics_table) |
| 945 | memcpy(metrics_table, smu_table->metrics_table, table_size)__builtin_memcpy((metrics_table), (smu_table->metrics_table ), (table_size)); |
| 946 | |
| 947 | return 0; |
| 948 | } |
| 949 | |
| 950 | int smu_cmn_get_combo_pptable(struct smu_context *smu) |
| 951 | { |
| 952 | void *pptable = smu->smu_table.combo_pptable; |
| 953 | |
| 954 | return smu_cmn_update_table(smu, |
| 955 | SMU_TABLE_COMBO_PPTABLE, |
| 956 | 0, |
| 957 | pptable, |
| 958 | false0); |
| 959 | } |
| 960 | |
| 961 | void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) |
| 962 | { |
| 963 | struct metrics_table_header *header = (struct metrics_table_header *)table; |
| 964 | uint16_t structure_size; |
| 965 | |
| 966 | #define METRICS_VERSION(a, b) ((a << 16) | b ) |
| 967 | |
| 968 | switch (METRICS_VERSION(frev, crev)) { |
| 969 | case METRICS_VERSION(1, 0): |
| 970 | structure_size = sizeof(struct gpu_metrics_v1_0); |
| 971 | break; |
| 972 | case METRICS_VERSION(1, 1): |
| 973 | structure_size = sizeof(struct gpu_metrics_v1_1); |
| 974 | break; |
| 975 | case METRICS_VERSION(1, 2): |
| 976 | structure_size = sizeof(struct gpu_metrics_v1_2); |
| 977 | break; |
| 978 | case METRICS_VERSION(1, 3): |
| 979 | structure_size = sizeof(struct gpu_metrics_v1_3); |
| 980 | break; |
| 981 | case METRICS_VERSION(2, 0): |
| 982 | structure_size = sizeof(struct gpu_metrics_v2_0); |
| 983 | break; |
| 984 | case METRICS_VERSION(2, 1): |
| 985 | structure_size = sizeof(struct gpu_metrics_v2_1); |
| 986 | break; |
| 987 | case METRICS_VERSION(2, 2): |
| 988 | structure_size = sizeof(struct gpu_metrics_v2_2); |
| 989 | break; |
| 990 | case METRICS_VERSION(2, 3): |
| 991 | structure_size = sizeof(struct gpu_metrics_v2_3); |
| 992 | break; |
| 993 | default: |
| 994 | return; |
| 995 | } |
| 996 | |
| 997 | #undef METRICS_VERSION |
| 998 | |
| 999 | memset(header, 0xFF, structure_size)__builtin_memset((header), (0xFF), (structure_size)); |
| 1000 | |
| 1001 | header->format_revision = frev; |
| 1002 | header->content_revision = crev; |
| 1003 | header->structure_size = structure_size; |
| 1004 | |
| 1005 | } |
| 1006 | |
| 1007 | int smu_cmn_set_mp1_state(struct smu_context *smu, |
| 1008 | enum pp_mp1_state mp1_state) |
| 1009 | { |
| 1010 | enum smu_message_type msg; |
| 1011 | int ret; |
| 1012 | |
| 1013 | switch (mp1_state) { |
| 1014 | case PP_MP1_STATE_SHUTDOWN: |
| 1015 | msg = SMU_MSG_PrepareMp1ForShutdown; |
| 1016 | break; |
| 1017 | case PP_MP1_STATE_UNLOAD: |
| 1018 | msg = SMU_MSG_PrepareMp1ForUnload; |
| 1019 | break; |
| 1020 | case PP_MP1_STATE_RESET: |
| 1021 | msg = SMU_MSG_PrepareMp1ForReset; |
| 1022 | break; |
| 1023 | case PP_MP1_STATE_NONE: |
| 1024 | default: |
| 1025 | return 0; |
| 1026 | } |
| 1027 | |
| 1028 | ret = smu_cmn_send_smc_msg(smu, msg, NULL((void *)0)); |
| 1029 | if (ret) |
| 1030 | dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n")printf("drm:pid%d:%s *ERROR* " "[PrepareMp1] Failed!\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__); |
| 1031 | |
| 1032 | return ret; |
| 1033 | } |
| 1034 | |
| 1035 | bool_Bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) |
| 1036 | { |
| 1037 | STUB()do { printf("%s: stub\n", __func__); } while(0); |
| 1038 | return false0; |
| 1039 | #ifdef notyet |
| 1040 | struct pci_dev *p = NULL((void *)0); |
| 1041 | bool_Bool snd_driver_loaded; |
| 1042 | |
| 1043 | /* |
| 1044 | * If the ASIC comes with no audio function, we always assume |
| 1045 | * it is "enabled". |
| 1046 | */ |
| 1047 | p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), |
| 1048 | adev->pdev->bus->number, 1); |
| 1049 | if (!p) |
| 1050 | return true1; |
| 1051 | |
| 1052 | snd_driver_loaded = pci_is_enabled(p) ? true1 : false0; |
| 1053 | |
| 1054 | pci_dev_put(p); |
| 1055 | |
| 1056 | return snd_driver_loaded; |
| 1057 | #endif |
| 1058 | } |