Bug Summary

File:dev/pci/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
Warning:line 1690, column 24
Value stored to 'adev' during its initialization is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name aldebaran_ppt.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#define SWSMU_CODE_LAYER_L2
25
26#include <linux/firmware.h>
27#include "amdgpu.h"
28#include "amdgpu_dpm.h"
29#include "amdgpu_smu.h"
30#include "atomfirmware.h"
31#include "amdgpu_atomfirmware.h"
32#include "amdgpu_atombios.h"
33#include "smu_v13_0.h"
34#include "smu13_driver_if_aldebaran.h"
35#include "soc15_common.h"
36#include "atom.h"
37#include "aldebaran_ppt.h"
38#include "smu_v13_0_pptable.h"
39#include "aldebaran_ppsmc.h"
40#include "nbio/nbio_7_4_offset.h"
41#include "nbio/nbio_7_4_sh_mask.h"
42#include "thm/thm_11_0_2_offset.h"
43#include "thm/thm_11_0_2_sh_mask.h"
44#include "amdgpu_xgmi.h"
45#include <linux/pci.h>
46#include "amdgpu_ras.h"
47#include "smu_cmn.h"
48#include "mp/mp_13_0_2_offset.h"
49
50/*
51 * DO NOT use these for err/warn/info/debug messages.
52 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
53 * They are more MGPU friendly.
54 */
55#undef pr_err
56#undef pr_warn
57#undef pr_info
58#undef pr_debug
59
60#define ALDEBARAN_FEA_MAP(smu_feature, aldebaran_feature)[smu_feature] = {1, (aldebaran_feature)} \
61 [smu_feature] = {1, (aldebaran_feature)}
62
63#define FEATURE_MASK(feature)(1ULL << feature) (1ULL << feature)
64#define SMC_DPM_FEATURE( (1ULL << 0) | (1ULL << 1) | (1ULL << 2) |
(1ULL << 3) | (1ULL << 4) | (1ULL << 5) | (
1ULL << 6) | (1ULL << 13))
( \
65 FEATURE_MASK(FEATURE_DATA_CALCULATIONS)(1ULL << 0) | \
66 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)(1ULL << 1) | \
67 FEATURE_MASK(FEATURE_DPM_UCLK_BIT)(1ULL << 2) | \
68 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)(1ULL << 3) | \
69 FEATURE_MASK(FEATURE_DPM_FCLK_BIT)(1ULL << 4) | \
70 FEATURE_MASK(FEATURE_DPM_LCLK_BIT)(1ULL << 5) | \
71 FEATURE_MASK(FEATURE_DPM_XGMI_BIT)(1ULL << 6) | \
72 FEATURE_MASK(FEATURE_DPM_VCN_BIT)(1ULL << 13))
73
74/* possible frequency drift (1Mhz) */
75#define EPSILON1 1
76
77#define smnPCIE_ESM_CTRL0x111003D0 0x111003D0
78
79/*
80 * SMU support ECCTABLE since version 68.42.0,
81 * use this to check ECCTALE feature whether support
82 */
83#define SUPPORT_ECCTABLE_SMU_VERSION0x00442a00 0x00442a00
84
85/*
86 * SMU support mca_ceumc_addr in ECCTABLE since version 68.55.0,
87 * use this to check mca_ceumc_addr record whether support
88 */
89#define SUPPORT_ECCTABLE_V2_SMU_VERSION0x00443700 0x00443700
90
91/*
92 * SMU support BAD CHENNEL info MSG since version 68.51.00,
93 * use this to check ECCTALE feature whether support
94 */
95#define SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION0x00443300 0x00443300
96
97static const struct smu_temperature_range smu13_thermal_policy[] =
98{
99 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
100 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
101};
102
103static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = {
104 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0)[SMU_MSG_TestMessage] = {1, (0x1), (0)},
105 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1)[SMU_MSG_GetSmuVersion] = {1, (0x2), (1)},
106 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1)[SMU_MSG_GetDriverIfVersion] = {1, (0x4), (1)},
107 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0)[SMU_MSG_EnableAllSmuFeatures] = {1, (0x7), (0)},
108 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0)[SMU_MSG_DisableAllSmuFeatures] = {1, (0x8), (0)},
109 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1)[SMU_MSG_GetEnabledSmuFeaturesLow] = {1, (0xD), (1)},
110 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1)[SMU_MSG_GetEnabledSmuFeaturesHigh] = {1, (0xE), (1)},
111 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1)[SMU_MSG_SetDriverDramAddrHigh] = {1, (0xF), (1)},
112 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1)[SMU_MSG_SetDriverDramAddrLow] = {1, (0x10), (1)},
113 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0)[SMU_MSG_SetToolsDramAddrHigh] = {1, (0x11), (0)},
114 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0)[SMU_MSG_SetToolsDramAddrLow] = {1, (0x12), (0)},
115 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1)[SMU_MSG_TransferTableSmu2Dram] = {1, (0x13), (1)},
116 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0)[SMU_MSG_TransferTableDram2Smu] = {1, (0x14), (0)},
117 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0)[SMU_MSG_UseDefaultPPTable] = {1, (0x15), (0)},
118 MSG_MAP(SetSystemVirtualDramAddrHigh, PPSMC_MSG_SetSystemVirtualDramAddrHigh, 0)[SMU_MSG_SetSystemVirtualDramAddrHigh] = {1, (0x16), (0)},
119 MSG_MAP(SetSystemVirtualDramAddrLow, PPSMC_MSG_SetSystemVirtualDramAddrLow, 0)[SMU_MSG_SetSystemVirtualDramAddrLow] = {1, (0x17), (0)},
120 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0)[SMU_MSG_SetSoftMinByFreq] = {1, (0x18), (0)},
121 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0)[SMU_MSG_SetSoftMaxByFreq] = {1, (0x19), (0)},
122 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0)[SMU_MSG_SetHardMinByFreq] = {1, (0x1A), (0)},
123 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0)[SMU_MSG_SetHardMaxByFreq] = {1, (0x1B), (0)},
124 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 0)[SMU_MSG_GetMinDpmFreq] = {1, (0x1C), (0)},
125 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 0)[SMU_MSG_GetMaxDpmFreq] = {1, (0x1D), (0)},
126 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1)[SMU_MSG_GetDpmFreqByIndex] = {1, (0x1E), (1)},
127 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1)[SMU_MSG_SetWorkloadMask] = {1, (0x1F), (1)},
128 MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm, 0)[SMU_MSG_GetVoltageByDpm] = {1, (0x20), (0)},
129 MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0)[SMU_MSG_GetVoltageByDpmOverdrive] = {1, (0x21), (0)},
130 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0)[SMU_MSG_SetPptLimit] = {1, (0x22), (0)},
131 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1)[SMU_MSG_GetPptLimit] = {1, (0x23), (1)},
132 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0)[SMU_MSG_PrepareMp1ForUnload] = {1, (0x24), (0)},
133 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0)[SMU_MSG_GfxDeviceDriverReset] = {1, (0x3), (0)},
134 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0)[SMU_MSG_RunDcBtc] = {1, (0x27), (0)},
135 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0)[SMU_MSG_DramLogSetDramAddrHigh] = {1, (0x28), (0)},
136 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0)[SMU_MSG_DramLogSetDramAddrLow] = {1, (0x29), (0)},
137 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0)[SMU_MSG_DramLogSetDramSize] = {1, (0x2A), (0)},
138 MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0)[SMU_MSG_GetDebugData] = {1, (0x2B), (0)},
139 MSG_MAP(WaflTest, PPSMC_MSG_WaflTest, 0)[SMU_MSG_WaflTest] = {1, (0x2C), (0)},
140 MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable, 0)[SMU_MSG_SetMemoryChannelEnable] = {1, (0x2E), (0)},
141 MSG_MAP(SetNumBadHbmPagesRetired, PPSMC_MSG_SetNumBadHbmPagesRetired, 0)[SMU_MSG_SetNumBadHbmPagesRetired] = {1, (0x2F), (0)},
142 MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 0)[SMU_MSG_DFCstateControl] = {1, (0x32), (0)},
143 MSG_MAP(GetGmiPwrDnHyst, PPSMC_MSG_GetGmiPwrDnHyst, 0)[SMU_MSG_GetGmiPwrDnHyst] = {1, (0x33), (0)},
144 MSG_MAP(SetGmiPwrDnHyst, PPSMC_MSG_SetGmiPwrDnHyst, 0)[SMU_MSG_SetGmiPwrDnHyst] = {1, (0x34), (0)},
145 MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 0)[SMU_MSG_GmiPwrDnControl] = {1, (0x35), (0)},
146 MSG_MAP(EnterGfxoff, PPSMC_MSG_EnterGfxoff, 0)[SMU_MSG_EnterGfxoff] = {1, (0x36), (0)},
147 MSG_MAP(ExitGfxoff, PPSMC_MSG_ExitGfxoff, 0)[SMU_MSG_ExitGfxoff] = {1, (0x37), (0)},
148 MSG_MAP(SetExecuteDMATest, PPSMC_MSG_SetExecuteDMATest, 0)[SMU_MSG_SetExecuteDMATest] = {1, (0x38), (0)},
149 MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0)[SMU_MSG_EnableDeterminism] = {1, (0x39), (0)},
150 MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0)[SMU_MSG_DisableDeterminism] = {1, (0x3A), (0)},
151 MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0)[SMU_MSG_SetUclkDpmMode] = {1, (0x3B), (0)},
152 MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0)[SMU_MSG_GfxDriverResetRecovery] = {1, (0x42), (0)},
153 MSG_MAP(BoardPowerCalibration, PPSMC_MSG_BoardPowerCalibration, 0)[SMU_MSG_BoardPowerCalibration] = {1, (0x43), (0)},
154 MSG_MAP(HeavySBR, PPSMC_MSG_HeavySBR, 0)[SMU_MSG_HeavySBR] = {1, (0x45), (0)},
155 MSG_MAP(SetBadHBMPagesRetiredFlagsPerChannel, PPSMC_MSG_SetBadHBMPagesRetiredFlagsPerChannel, 0)[SMU_MSG_SetBadHBMPagesRetiredFlagsPerChannel] = {1, (0x46), (
0)}
,
156};
157
158static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
159 CLK_MAP(GFXCLK, PPCLK_GFXCLK)[SMU_GFXCLK] = {1, (PPCLK_GFXCLK)},
160 CLK_MAP(SCLK, PPCLK_GFXCLK)[SMU_SCLK] = {1, (PPCLK_GFXCLK)},
161 CLK_MAP(SOCCLK, PPCLK_SOCCLK)[SMU_SOCCLK] = {1, (PPCLK_SOCCLK)},
162 CLK_MAP(FCLK, PPCLK_FCLK)[SMU_FCLK] = {1, (PPCLK_FCLK)},
163 CLK_MAP(UCLK, PPCLK_UCLK)[SMU_UCLK] = {1, (PPCLK_UCLK)},
164 CLK_MAP(MCLK, PPCLK_UCLK)[SMU_MCLK] = {1, (PPCLK_UCLK)},
165 CLK_MAP(DCLK, PPCLK_DCLK)[SMU_DCLK] = {1, (PPCLK_DCLK)},
166 CLK_MAP(VCLK, PPCLK_VCLK)[SMU_VCLK] = {1, (PPCLK_VCLK)},
167 CLK_MAP(LCLK, PPCLK_LCLK)[SMU_LCLK] = {1, (PPCLK_LCLK)},
168};
169
170static const struct cmn2asic_mapping aldebaran_feature_mask_map[SMU_FEATURE_COUNT] = {
171 ALDEBARAN_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATIONS)[SMU_FEATURE_DATA_CALCULATIONS_BIT] = {1, (0)},
172 ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, FEATURE_DPM_GFXCLK_BIT)[SMU_FEATURE_DPM_GFXCLK_BIT] = {1, (1)},
173 ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, FEATURE_DPM_UCLK_BIT)[SMU_FEATURE_DPM_UCLK_BIT] = {1, (2)},
174 ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, FEATURE_DPM_SOCCLK_BIT)[SMU_FEATURE_DPM_SOCCLK_BIT] = {1, (3)},
175 ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, FEATURE_DPM_FCLK_BIT)[SMU_FEATURE_DPM_FCLK_BIT] = {1, (4)},
176 ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, FEATURE_DPM_LCLK_BIT)[SMU_FEATURE_DPM_LCLK_BIT] = {1, (5)},
177 ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, FEATURE_DPM_XGMI_BIT)[SMU_FEATURE_DPM_XGMI_BIT] = {1, (6)},
178 ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, FEATURE_DS_GFXCLK_BIT)[SMU_FEATURE_DS_GFXCLK_BIT] = {1, (7)},
179 ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, FEATURE_DS_SOCCLK_BIT)[SMU_FEATURE_DS_SOCCLK_BIT] = {1, (8)},
180 ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, FEATURE_DS_LCLK_BIT)[SMU_FEATURE_DS_LCLK_BIT] = {1, (9)},
181 ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, FEATURE_DS_FCLK_BIT)[SMU_FEATURE_DS_FCLK_BIT] = {1, (10)},
182 ALDEBARAN_FEA_MAP(SMU_FEATURE_DS_UCLK_BIT, FEATURE_DS_UCLK_BIT)[SMU_FEATURE_DS_UCLK_BIT] = {1, (11)},
183 ALDEBARAN_FEA_MAP(SMU_FEATURE_GFX_SS_BIT, FEATURE_GFX_SS_BIT)[SMU_FEATURE_GFX_SS_BIT] = {1, (12)},
184 ALDEBARAN_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, FEATURE_DPM_VCN_BIT)[SMU_FEATURE_VCN_DPM_BIT] = {1, (13)},
185 ALDEBARAN_FEA_MAP(SMU_FEATURE_RSMU_SMN_CG_BIT, FEATURE_RSMU_SMN_CG_BIT)[SMU_FEATURE_RSMU_SMN_CG_BIT] = {1, (14)},
186 ALDEBARAN_FEA_MAP(SMU_FEATURE_WAFL_CG_BIT, FEATURE_WAFL_CG_BIT)[SMU_FEATURE_WAFL_CG_BIT] = {1, (15)},
187 ALDEBARAN_FEA_MAP(SMU_FEATURE_PPT_BIT, FEATURE_PPT_BIT)[SMU_FEATURE_PPT_BIT] = {1, (16)},
188 ALDEBARAN_FEA_MAP(SMU_FEATURE_TDC_BIT, FEATURE_TDC_BIT)[SMU_FEATURE_TDC_BIT] = {1, (17)},
189 ALDEBARAN_FEA_MAP(SMU_FEATURE_APCC_PLUS_BIT, FEATURE_APCC_PLUS_BIT)[SMU_FEATURE_APCC_PLUS_BIT] = {1, (18)},
190 ALDEBARAN_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, FEATURE_APCC_DFLL_BIT)[SMU_FEATURE_APCC_DFLL_BIT] = {1, (19)},
191 ALDEBARAN_FEA_MAP(SMU_FEATURE_FUSE_CG_BIT, FEATURE_FUSE_CG_BIT)[SMU_FEATURE_FUSE_CG_BIT] = {1, (26)},
192 ALDEBARAN_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, FEATURE_MP1_CG_BIT)[SMU_FEATURE_MP1_CG_BIT] = {1, (27)},
193 ALDEBARAN_FEA_MAP(SMU_FEATURE_SMUIO_CG_BIT, FEATURE_SMUIO_CG_BIT)[SMU_FEATURE_SMUIO_CG_BIT] = {1, (28)},
194 ALDEBARAN_FEA_MAP(SMU_FEATURE_THM_CG_BIT, FEATURE_THM_CG_BIT)[SMU_FEATURE_THM_CG_BIT] = {1, (29)},
195 ALDEBARAN_FEA_MAP(SMU_FEATURE_CLK_CG_BIT, FEATURE_CLK_CG_BIT)[SMU_FEATURE_CLK_CG_BIT] = {1, (30)},
196 ALDEBARAN_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, FEATURE_FW_CTF_BIT)[SMU_FEATURE_FW_CTF_BIT] = {1, (20)},
197 ALDEBARAN_FEA_MAP(SMU_FEATURE_THERMAL_BIT, FEATURE_THERMAL_BIT)[SMU_FEATURE_THERMAL_BIT] = {1, (21)},
198 ALDEBARAN_FEA_MAP(SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, FEATURE_OUT_OF_BAND_MONITOR_BIT)[SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT] = {1, (22)},
199 ALDEBARAN_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT,FEATURE_XGMI_PER_LINK_PWR_DWN)[SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT] = {1, (24)},
200 ALDEBARAN_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, FEATURE_DF_CSTATE)[SMU_FEATURE_DF_CSTATE_BIT] = {1, (25)},
201};
202
203static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = {
204 TAB_MAP(PPTABLE)[SMU_TABLE_PPTABLE] = {1, 0},
205 TAB_MAP(AVFS_PSM_DEBUG)[SMU_TABLE_AVFS_PSM_DEBUG] = {1, 1},
206 TAB_MAP(AVFS_FUSE_OVERRIDE)[SMU_TABLE_AVFS_FUSE_OVERRIDE] = {1, 2},
207 TAB_MAP(PMSTATUSLOG)[SMU_TABLE_PMSTATUSLOG] = {1, 3},
208 TAB_MAP(SMU_METRICS)[SMU_TABLE_SMU_METRICS] = {1, 4},
209 TAB_MAP(DRIVER_SMU_CONFIG)[SMU_TABLE_DRIVER_SMU_CONFIG] = {1, 5},
210 TAB_MAP(I2C_COMMANDS)[SMU_TABLE_I2C_COMMANDS] = {1, 6},
211 TAB_MAP(ECCINFO)[SMU_TABLE_ECCINFO] = {1, 7},
212};
213
214static const uint8_t aldebaran_throttler_map[] = {
215 [THROTTLER_PPT0_BIT0] = (SMU_THROTTLER_PPT0_BIT0),
216 [THROTTLER_PPT1_BIT1] = (SMU_THROTTLER_PPT1_BIT1),
217 [THROTTLER_TDC_GFX_BIT2] = (SMU_THROTTLER_TDC_GFX_BIT16),
218 [THROTTLER_TDC_SOC_BIT3] = (SMU_THROTTLER_TDC_SOC_BIT17),
219 [THROTTLER_TDC_HBM_BIT4] = (SMU_THROTTLER_TDC_MEM_BIT18),
220 [THROTTLER_TEMP_GPU_BIT6] = (SMU_THROTTLER_TEMP_GPU_BIT32),
221 [THROTTLER_TEMP_MEM_BIT7] = (SMU_THROTTLER_TEMP_MEM_BIT34),
222 [THROTTLER_TEMP_VR_GFX_BIT11] = (SMU_THROTTLER_TEMP_VR_GFX_BIT38),
223 [THROTTLER_TEMP_VR_SOC_BIT12] = (SMU_THROTTLER_TEMP_VR_SOC_BIT39),
224 [THROTTLER_TEMP_VR_MEM_BIT13] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT40),
225 [THROTTLER_APCC_BIT19] = (SMU_THROTTLER_APCC_BIT23),
226};
227
228static int aldebaran_tables_init(struct smu_context *smu)
229{
230 struct smu_table_context *smu_table = &smu->smu_table;
231 struct smu_table *tables = smu_table->tables;
232
233 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),do { tables[SMU_TABLE_PPTABLE].size = sizeof(PPTable_t); tables
[SMU_TABLE_PPTABLE].align = (1 << 12); tables[SMU_TABLE_PPTABLE
].domain = 0x4; } while (0)
234 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_PPTABLE].size = sizeof(PPTable_t); tables
[SMU_TABLE_PPTABLE].align = (1 << 12); tables[SMU_TABLE_PPTABLE
].domain = 0x4; } while (0)
;
235
236 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,do { tables[SMU_TABLE_PMSTATUSLOG].size = 0x19000; tables[SMU_TABLE_PMSTATUSLOG
].align = (1 << 12); tables[SMU_TABLE_PMSTATUSLOG].domain
= 0x4; } while (0)
237 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_PMSTATUSLOG].size = 0x19000; tables[SMU_TABLE_PMSTATUSLOG
].align = (1 << 12); tables[SMU_TABLE_PMSTATUSLOG].domain
= 0x4; } while (0)
;
238
239 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),do { tables[SMU_TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t
); tables[SMU_TABLE_SMU_METRICS].align = (1 << 12); tables
[SMU_TABLE_SMU_METRICS].domain = 0x4; } while (0)
240 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t
); tables[SMU_TABLE_SMU_METRICS].align = (1 << 12); tables
[SMU_TABLE_SMU_METRICS].domain = 0x4; } while (0)
;
241
242 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),do { tables[SMU_TABLE_I2C_COMMANDS].size = sizeof(SwI2cRequest_t
); tables[SMU_TABLE_I2C_COMMANDS].align = (1 << 12); tables
[SMU_TABLE_I2C_COMMANDS].domain = 0x4; } while (0)
243 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_I2C_COMMANDS].size = sizeof(SwI2cRequest_t
); tables[SMU_TABLE_I2C_COMMANDS].align = (1 << 12); tables
[SMU_TABLE_I2C_COMMANDS].domain = 0x4; } while (0)
;
244
245 SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),do { tables[SMU_TABLE_ECCINFO].size = sizeof(EccInfoTable_t);
tables[SMU_TABLE_ECCINFO].align = (1 << 12); tables[SMU_TABLE_ECCINFO
].domain = 0x4; } while (0)
246 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM)do { tables[SMU_TABLE_ECCINFO].size = sizeof(EccInfoTable_t);
tables[SMU_TABLE_ECCINFO].align = (1 << 12); tables[SMU_TABLE_ECCINFO
].domain = 0x4; } while (0)
;
247
248 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL(0x0001 | 0x0004));
249 if (!smu_table->metrics_table)
250 return -ENOMEM12;
251 smu_table->metrics_time = 0;
252
253 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
254 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL(0x0001 | 0x0004));
255 if (!smu_table->gpu_metrics_table) {
256 kfree(smu_table->metrics_table);
257 return -ENOMEM12;
258 }
259
260 smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL(0x0001 | 0x0004));
261 if (!smu_table->ecc_table) {
262 kfree(smu_table->metrics_table);
263 kfree(smu_table->gpu_metrics_table);
264 return -ENOMEM12;
265 }
266
267 return 0;
268}
269
270static int aldebaran_allocate_dpm_context(struct smu_context *smu)
271{
272 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
273
274 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
275 GFP_KERNEL(0x0001 | 0x0004));
276 if (!smu_dpm->dpm_context)
277 return -ENOMEM12;
278 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
279
280 return 0;
281}
282
283static int aldebaran_init_smc_tables(struct smu_context *smu)
284{
285 int ret = 0;
286
287 ret = aldebaran_tables_init(smu);
288 if (ret)
289 return ret;
290
291 ret = aldebaran_allocate_dpm_context(smu);
292 if (ret)
293 return ret;
294
295 return smu_v13_0_init_smc_tables(smu);
296}
297
298static int aldebaran_get_allowed_feature_mask(struct smu_context *smu,
299 uint32_t *feature_mask, uint32_t num)
300{
301 if (num > 2)
302 return -EINVAL22;
303
304 /* pptable will handle the features to enable */
305 memset(feature_mask, 0xFF, sizeof(uint32_t) * num)__builtin_memset((feature_mask), (0xFF), (sizeof(uint32_t) * num
))
;
306
307 return 0;
308}
309
310static int aldebaran_set_default_dpm_table(struct smu_context *smu)
311{
312 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
313 struct smu_13_0_dpm_table *dpm_table = NULL((void *)0);
314 PPTable_t *pptable = smu->smu_table.driver_pptable;
315 int ret = 0;
316
317 /* socclk dpm table setup */
318 dpm_table = &dpm_context->dpm_tables.soc_table;
319 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
320 ret = smu_v13_0_set_single_dpm_table(smu,
321 SMU_SOCCLK,
322 dpm_table);
323 if (ret)
324 return ret;
325 } else {
326 dpm_table->count = 1;
327 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
328 dpm_table->dpm_levels[0].enabled = true1;
329 dpm_table->min = dpm_table->dpm_levels[0].value;
330 dpm_table->max = dpm_table->dpm_levels[0].value;
331 }
332
333 /* gfxclk dpm table setup */
334 dpm_table = &dpm_context->dpm_tables.gfx_table;
335 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
336 /* in the case of gfxclk, only fine-grained dpm is honored */
337 dpm_table->count = 2;
338 dpm_table->dpm_levels[0].value = pptable->GfxclkFmin;
339 dpm_table->dpm_levels[0].enabled = true1;
340 dpm_table->dpm_levels[1].value = pptable->GfxclkFmax;
341 dpm_table->dpm_levels[1].enabled = true1;
342 dpm_table->min = dpm_table->dpm_levels[0].value;
343 dpm_table->max = dpm_table->dpm_levels[1].value;
344 } else {
345 dpm_table->count = 1;
346 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
347 dpm_table->dpm_levels[0].enabled = true1;
348 dpm_table->min = dpm_table->dpm_levels[0].value;
349 dpm_table->max = dpm_table->dpm_levels[0].value;
350 }
351
352 /* memclk dpm table setup */
353 dpm_table = &dpm_context->dpm_tables.uclk_table;
354 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
355 ret = smu_v13_0_set_single_dpm_table(smu,
356 SMU_UCLK,
357 dpm_table);
358 if (ret)
359 return ret;
360 } else {
361 dpm_table->count = 1;
362 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
363 dpm_table->dpm_levels[0].enabled = true1;
364 dpm_table->min = dpm_table->dpm_levels[0].value;
365 dpm_table->max = dpm_table->dpm_levels[0].value;
366 }
367
368 /* fclk dpm table setup */
369 dpm_table = &dpm_context->dpm_tables.fclk_table;
370 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
371 ret = smu_v13_0_set_single_dpm_table(smu,
372 SMU_FCLK,
373 dpm_table);
374 if (ret)
375 return ret;
376 } else {
377 dpm_table->count = 1;
378 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
379 dpm_table->dpm_levels[0].enabled = true1;
380 dpm_table->min = dpm_table->dpm_levels[0].value;
381 dpm_table->max = dpm_table->dpm_levels[0].value;
382 }
383
384 return 0;
385}
386
387static int aldebaran_check_powerplay_table(struct smu_context *smu)
388{
389 struct smu_table_context *table_context = &smu->smu_table;
390 struct smu_13_0_powerplay_table *powerplay_table =
391 table_context->power_play_table;
392
393 table_context->thermal_controller_type =
394 powerplay_table->thermal_controller_type;
395
396 return 0;
397}
398
399static int aldebaran_store_powerplay_table(struct smu_context *smu)
400{
401 struct smu_table_context *table_context = &smu->smu_table;
402 struct smu_13_0_powerplay_table *powerplay_table =
403 table_context->power_play_table;
404 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,__builtin_memcpy((table_context->driver_pptable), (&powerplay_table
->smc_pptable), (sizeof(PPTable_t)))
405 sizeof(PPTable_t))__builtin_memcpy((table_context->driver_pptable), (&powerplay_table
->smc_pptable), (sizeof(PPTable_t)))
;
406
407 return 0;
408}
409
410static int aldebaran_append_powerplay_table(struct smu_context *smu)
411{
412 struct smu_table_context *table_context = &smu->smu_table;
413 PPTable_t *smc_pptable = table_context->driver_pptable;
414 struct atom_smc_dpm_info_v4_10 *smc_dpm_table;
415 int index, ret;
416
417 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,(__builtin_offsetof(struct atom_master_list_of_data_tables_v2_1
, smc_dpm_info) / sizeof(uint16_t))
418 smc_dpm_info)(__builtin_offsetof(struct atom_master_list_of_data_tables_v2_1
, smc_dpm_info) / sizeof(uint16_t))
;
419
420 ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL((void *)0), NULL((void *)0), NULL((void *)0),
421 (uint8_t **)&smc_dpm_table);
422 if (ret)
423 return ret;
424
425 dev_info(smu->adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n",do { } while(0)
426 smc_dpm_table->table_header.format_revision,do { } while(0)
427 smc_dpm_table->table_header.content_revision)do { } while(0);
428
429 if ((smc_dpm_table->table_header.format_revision == 4) &&
430 (smc_dpm_table->table_header.content_revision == 10))
431 smu_memcpy_trailing(smc_pptable, GfxMaxCurrent, reserved,({ size_t __src_offset = __builtin_offsetof(typeof(*(smc_dpm_table
)), GfxMaxCurrent); size_t __src_size = sizeof(*(smc_dpm_table
)) - __src_offset; size_t __dst_offset = __builtin_offsetof(typeof
(*(smc_pptable)), GfxMaxCurrent); size_t __dst_size = (__builtin_offsetof
(typeof(*(smc_pptable)), reserved) + sizeof((((typeof(*(smc_pptable
)) *)0)->reserved))) - __dst_offset; __builtin_memcpy((u8 *
)(smc_pptable) + __dst_offset, (u8 *)(smc_dpm_table) + __src_offset
, __dst_size); })
432 smc_dpm_table, GfxMaxCurrent)({ size_t __src_offset = __builtin_offsetof(typeof(*(smc_dpm_table
)), GfxMaxCurrent); size_t __src_size = sizeof(*(smc_dpm_table
)) - __src_offset; size_t __dst_offset = __builtin_offsetof(typeof
(*(smc_pptable)), GfxMaxCurrent); size_t __dst_size = (__builtin_offsetof
(typeof(*(smc_pptable)), reserved) + sizeof((((typeof(*(smc_pptable
)) *)0)->reserved))) - __dst_offset; __builtin_memcpy((u8 *
)(smc_pptable) + __dst_offset, (u8 *)(smc_dpm_table) + __src_offset
, __dst_size); })
;
433 return 0;
434}
435
436static int aldebaran_setup_pptable(struct smu_context *smu)
437{
438 int ret = 0;
439
440 /* VBIOS pptable is the first choice */
441 smu->smu_table.boot_values.pp_table_id = 0;
442
443 ret = smu_v13_0_setup_pptable(smu);
444 if (ret)
445 return ret;
446
447 ret = aldebaran_store_powerplay_table(smu);
448 if (ret)
449 return ret;
450
451 ret = aldebaran_append_powerplay_table(smu);
452 if (ret)
453 return ret;
454
455 ret = aldebaran_check_powerplay_table(smu);
456 if (ret)
457 return ret;
458
459 return ret;
460}
461
462static bool_Bool aldebaran_is_primary(struct smu_context *smu)
463{
464 struct amdgpu_device *adev = smu->adev;
465
466 if (adev->smuio.funcs && adev->smuio.funcs->get_die_id)
467 return adev->smuio.funcs->get_die_id(adev) == 0;
468
469 return true1;
470}
471
472static int aldebaran_run_board_btc(struct smu_context *smu)
473{
474 u32 smu_version;
475 int ret;
476
477 if (!aldebaran_is_primary(smu))
478 return 0;
479
480 ret = smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version);
481 if (ret) {
482 dev_err(smu->adev->dev, "Failed to get smu version!\n")printf("drm:pid%d:%s *ERROR* " "Failed to get smu version!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
483 return ret;
484 }
485 if (smu_version <= 0x00441d00)
486 return 0;
487
488 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL((void *)0));
489 if (ret)
490 dev_err(smu->adev->dev, "Board power calibration failed!\n")printf("drm:pid%d:%s *ERROR* " "Board power calibration failed!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
491
492 return ret;
493}
494
495static int aldebaran_run_btc(struct smu_context *smu)
496{
497 int ret;
498
499 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL((void *)0));
500 if (ret)
501 dev_err(smu->adev->dev, "RunDcBtc failed!\n")printf("drm:pid%d:%s *ERROR* " "RunDcBtc failed!\n", ({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_curproc->p_p->ps_pid, __func__)
;
502 else
503 ret = aldebaran_run_board_btc(smu);
504
505 return ret;
506}
507
508static int aldebaran_populate_umd_state_clk(struct smu_context *smu)
509{
510 struct smu_13_0_dpm_context *dpm_context =
511 smu->smu_dpm.dpm_context;
512 struct smu_13_0_dpm_table *gfx_table =
513 &dpm_context->dpm_tables.gfx_table;
514 struct smu_13_0_dpm_table *mem_table =
515 &dpm_context->dpm_tables.uclk_table;
516 struct smu_13_0_dpm_table *soc_table =
517 &dpm_context->dpm_tables.soc_table;
518 struct smu_umd_pstate_table *pstate_table =
519 &smu->pstate_table;
520
521 pstate_table->gfxclk_pstate.min = gfx_table->min;
522 pstate_table->gfxclk_pstate.peak = gfx_table->max;
523 pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
524 pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
525
526 pstate_table->uclk_pstate.min = mem_table->min;
527 pstate_table->uclk_pstate.peak = mem_table->max;
528 pstate_table->uclk_pstate.curr.min = mem_table->min;
529 pstate_table->uclk_pstate.curr.max = mem_table->max;
530
531 pstate_table->socclk_pstate.min = soc_table->min;
532 pstate_table->socclk_pstate.peak = soc_table->max;
533 pstate_table->socclk_pstate.curr.min = soc_table->min;
534 pstate_table->socclk_pstate.curr.max = soc_table->max;
535
536 if (gfx_table->count > ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL0x3 &&
537 mem_table->count > ALDEBARAN_UMD_PSTATE_MCLK_LEVEL0x2 &&
538 soc_table->count > ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL0x3) {
539 pstate_table->gfxclk_pstate.standard =
540 gfx_table->dpm_levels[ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL0x3].value;
541 pstate_table->uclk_pstate.standard =
542 mem_table->dpm_levels[ALDEBARAN_UMD_PSTATE_MCLK_LEVEL0x2].value;
543 pstate_table->socclk_pstate.standard =
544 soc_table->dpm_levels[ALDEBARAN_UMD_PSTATE_SOCCLK_LEVEL0x3].value;
545 } else {
546 pstate_table->gfxclk_pstate.standard =
547 pstate_table->gfxclk_pstate.min;
548 pstate_table->uclk_pstate.standard =
549 pstate_table->uclk_pstate.min;
550 pstate_table->socclk_pstate.standard =
551 pstate_table->socclk_pstate.min;
552 }
553
554 return 0;
555}
556
557static int aldebaran_get_clk_table(struct smu_context *smu,
558 struct pp_clock_levels_with_latency *clocks,
559 struct smu_13_0_dpm_table *dpm_table)
560{
561 uint32_t i;
562
563 clocks->num_levels = min_t(uint32_t,({ uint32_t __min_a = (dpm_table->count); uint32_t __min_b
= ((uint32_t)16); __min_a < __min_b ? __min_a : __min_b; }
)
564 dpm_table->count,({ uint32_t __min_a = (dpm_table->count); uint32_t __min_b
= ((uint32_t)16); __min_a < __min_b ? __min_a : __min_b; }
)
565 (uint32_t)PP_MAX_CLOCK_LEVELS)({ uint32_t __min_a = (dpm_table->count); uint32_t __min_b
= ((uint32_t)16); __min_a < __min_b ? __min_a : __min_b; }
)
;
566
567 for (i = 0; i < clocks->num_levels; i++) {
568 clocks->data[i].clocks_in_khz =
569 dpm_table->dpm_levels[i].value * 1000;
570 clocks->data[i].latency_in_us = 0;
571 }
572
573 return 0;
574}
575
576static int aldebaran_freqs_in_same_level(int32_t frequency1,
577 int32_t frequency2)
578{
579 return (abs(frequency1 - frequency2) <= EPSILON1);
580}
581
582static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
583 MetricsMember_t member,
584 uint32_t *value)
585{
586 struct smu_table_context *smu_table= &smu->smu_table;
587 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
588 int ret = 0;
589
590 ret = smu_cmn_get_metrics_table(smu,
591 NULL((void *)0),
592 false0);
593 if (ret)
594 return ret;
595
596 switch (member) {
597 case METRICS_CURR_GFXCLK:
598 *value = metrics->CurrClock[PPCLK_GFXCLK];
599 break;
600 case METRICS_CURR_SOCCLK:
601 *value = metrics->CurrClock[PPCLK_SOCCLK];
602 break;
603 case METRICS_CURR_UCLK:
604 *value = metrics->CurrClock[PPCLK_UCLK];
605 break;
606 case METRICS_CURR_VCLK:
607 *value = metrics->CurrClock[PPCLK_VCLK];
608 break;
609 case METRICS_CURR_DCLK:
610 *value = metrics->CurrClock[PPCLK_DCLK];
611 break;
612 case METRICS_CURR_FCLK:
613 *value = metrics->CurrClock[PPCLK_FCLK];
614 break;
615 case METRICS_AVERAGE_GFXCLK:
616 *value = metrics->AverageGfxclkFrequency;
617 break;
618 case METRICS_AVERAGE_SOCCLK:
619 *value = metrics->AverageSocclkFrequency;
620 break;
621 case METRICS_AVERAGE_UCLK:
622 *value = metrics->AverageUclkFrequency;
623 break;
624 case METRICS_AVERAGE_GFXACTIVITY:
625 *value = metrics->AverageGfxActivity;
626 break;
627 case METRICS_AVERAGE_MEMACTIVITY:
628 *value = metrics->AverageUclkActivity;
629 break;
630 case METRICS_AVERAGE_SOCKETPOWER:
631 /* Valid power data is available only from primary die */
632 *value = aldebaran_is_primary(smu) ?
633 metrics->AverageSocketPower << 8 :
634 0;
635 break;
636 case METRICS_TEMPERATURE_EDGE:
637 *value = metrics->TemperatureEdge *
638 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
639 break;
640 case METRICS_TEMPERATURE_HOTSPOT:
641 *value = metrics->TemperatureHotspot *
642 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
643 break;
644 case METRICS_TEMPERATURE_MEM:
645 *value = metrics->TemperatureHBM *
646 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
647 break;
648 case METRICS_TEMPERATURE_VRGFX:
649 *value = metrics->TemperatureVrGfx *
650 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
651 break;
652 case METRICS_TEMPERATURE_VRSOC:
653 *value = metrics->TemperatureVrSoc *
654 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
655 break;
656 case METRICS_TEMPERATURE_VRMEM:
657 *value = metrics->TemperatureVrMem *
658 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
659 break;
660 case METRICS_THROTTLER_STATUS:
661 *value = metrics->ThrottlerStatus;
662 break;
663 case METRICS_UNIQUE_ID_UPPER32:
664 *value = metrics->PublicSerialNumUpper32;
665 break;
666 case METRICS_UNIQUE_ID_LOWER32:
667 *value = metrics->PublicSerialNumLower32;
668 break;
669 default:
670 *value = UINT_MAX0xffffffffU;
671 break;
672 }
673
674 return ret;
675}
676
677static int aldebaran_get_current_clk_freq_by_table(struct smu_context *smu,
678 enum smu_clk_type clk_type,
679 uint32_t *value)
680{
681 MetricsMember_t member_type;
682 int clk_id = 0;
683
684 if (!value)
685 return -EINVAL22;
686
687 clk_id = smu_cmn_to_asic_specific_index(smu,
688 CMN2ASIC_MAPPING_CLK,
689 clk_type);
690 if (clk_id < 0)
691 return -EINVAL22;
692
693 switch (clk_id) {
694 case PPCLK_GFXCLK:
695 /*
696 * CurrClock[clk_id] can provide accurate
697 * output only when the dpm feature is enabled.
698 * We can use Average_* for dpm disabled case.
699 * But this is available for gfxclk/uclk/socclk/vclk/dclk.
700 */
701 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
702 member_type = METRICS_CURR_GFXCLK;
703 else
704 member_type = METRICS_AVERAGE_GFXCLK;
705 break;
706 case PPCLK_UCLK:
707 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
708 member_type = METRICS_CURR_UCLK;
709 else
710 member_type = METRICS_AVERAGE_UCLK;
711 break;
712 case PPCLK_SOCCLK:
713 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
714 member_type = METRICS_CURR_SOCCLK;
715 else
716 member_type = METRICS_AVERAGE_SOCCLK;
717 break;
718 case PPCLK_VCLK:
719 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
720 member_type = METRICS_CURR_VCLK;
721 else
722 member_type = METRICS_AVERAGE_VCLK;
723 break;
724 case PPCLK_DCLK:
725 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
726 member_type = METRICS_CURR_DCLK;
727 else
728 member_type = METRICS_AVERAGE_DCLK;
729 break;
730 case PPCLK_FCLK:
731 member_type = METRICS_CURR_FCLK;
732 break;
733 default:
734 return -EINVAL22;
735 }
736
737 return aldebaran_get_smu_metrics_data(smu,
738 member_type,
739 value);
740}
741
742static int aldebaran_print_clk_levels(struct smu_context *smu,
743 enum smu_clk_type type, char *buf)
744{
745 int i, now, size = 0;
746 int ret = 0;
747 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
748 struct pp_clock_levels_with_latency clocks;
749 struct smu_13_0_dpm_table *single_dpm_table;
750 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
751 struct smu_13_0_dpm_context *dpm_context = NULL((void *)0);
752 int display_levels;
753 uint32_t freq_values[3] = {0};
754 uint32_t min_clk, max_clk;
755
756 smu_cmn_get_sysfs_buf(&buf, &size);
757
758 if (amdgpu_ras_intr_triggered()) {
759 size += sysfs_emit_at(buf, size, "unavailable\n");
760 return size;
761 }
762
763 dpm_context = smu_dpm->dpm_context;
764
765 switch (type) {
766
767 case SMU_OD_SCLK:
768 size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
769 fallthroughdo {} while (0);
770 case SMU_SCLK:
771 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
772 if (ret) {
773 dev_err(smu->adev->dev, "Attempt to get current gfx clk Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get current gfx clk Failed!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
774 return ret;
775 }
776
777 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
778 ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
779 if (ret) {
780 dev_err(smu->adev->dev, "Attempt to get gfx clk levels Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get gfx clk levels Failed!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
781 return ret;
782 }
783
784 display_levels = (clocks.num_levels == 1) ? 1 : 2;
785
786 min_clk = pstate_table->gfxclk_pstate.curr.min;
787 max_clk = pstate_table->gfxclk_pstate.curr.max;
788
789 freq_values[0] = min_clk;
790 freq_values[1] = max_clk;
791
792 /* fine-grained dpm has only 2 levels */
793 if (now > min_clk && now < max_clk) {
794 display_levels++;
795 freq_values[2] = max_clk;
796 freq_values[1] = now;
797 }
798
799 for (i = 0; i < display_levels; i++)
800 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
801 freq_values[i],
802 (display_levels == 1) ?
803 "*" :
804 (aldebaran_freqs_in_same_level(
805 freq_values[i], now) ?
806 "*" :
807 ""));
808
809 break;
810
811 case SMU_OD_MCLK:
812 size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
813 fallthroughdo {} while (0);
814 case SMU_MCLK:
815 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);
816 if (ret) {
817 dev_err(smu->adev->dev, "Attempt to get current mclk Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get current mclk Failed!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
818 return ret;
819 }
820
821 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
822 ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
823 if (ret) {
824 dev_err(smu->adev->dev, "Attempt to get memory clk levels Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get memory clk levels Failed!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
825 return ret;
826 }
827
828 for (i = 0; i < clocks.num_levels; i++)
829 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
830 i, clocks.data[i].clocks_in_khz / 1000,
831 (clocks.num_levels == 1) ? "*" :
832 (aldebaran_freqs_in_same_level(
833 clocks.data[i].clocks_in_khz / 1000,
834 now) ? "*" : ""));
835 break;
836
837 case SMU_SOCCLK:
838 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &now);
839 if (ret) {
840 dev_err(smu->adev->dev, "Attempt to get current socclk Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get current socclk Failed!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
841 return ret;
842 }
843
844 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
845 ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
846 if (ret) {
847 dev_err(smu->adev->dev, "Attempt to get socclk levels Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get socclk levels Failed!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
848 return ret;
849 }
850
851 for (i = 0; i < clocks.num_levels; i++)
852 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
853 i, clocks.data[i].clocks_in_khz / 1000,
854 (clocks.num_levels == 1) ? "*" :
855 (aldebaran_freqs_in_same_level(
856 clocks.data[i].clocks_in_khz / 1000,
857 now) ? "*" : ""));
858 break;
859
860 case SMU_FCLK:
861 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &now);
862 if (ret) {
863 dev_err(smu->adev->dev, "Attempt to get current fclk Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get current fclk Failed!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
864 return ret;
865 }
866
867 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
868 ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
869 if (ret) {
870 dev_err(smu->adev->dev, "Attempt to get fclk levels Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get fclk levels Failed!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
871 return ret;
872 }
873
874 for (i = 0; i < single_dpm_table->count; i++)
875 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
876 i, single_dpm_table->dpm_levels[i].value,
877 (clocks.num_levels == 1) ? "*" :
878 (aldebaran_freqs_in_same_level(
879 clocks.data[i].clocks_in_khz / 1000,
880 now) ? "*" : ""));
881 break;
882
883 case SMU_VCLK:
884 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
885 if (ret) {
886 dev_err(smu->adev->dev, "Attempt to get current vclk Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get current vclk Failed!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
887 return ret;
888 }
889
890 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
891 ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
892 if (ret) {
893 dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get vclk levels Failed!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
894 return ret;
895 }
896
897 for (i = 0; i < single_dpm_table->count; i++)
898 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
899 i, single_dpm_table->dpm_levels[i].value,
900 (clocks.num_levels == 1) ? "*" :
901 (aldebaran_freqs_in_same_level(
902 clocks.data[i].clocks_in_khz / 1000,
903 now) ? "*" : ""));
904 break;
905
906 case SMU_DCLK:
907 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
908 if (ret) {
909 dev_err(smu->adev->dev, "Attempt to get current dclk Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get current dclk Failed!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
910 return ret;
911 }
912
913 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
914 ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
915 if (ret) {
916 dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!")printf("drm:pid%d:%s *ERROR* " "Attempt to get dclk levels Failed!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
917 return ret;
918 }
919
920 for (i = 0; i < single_dpm_table->count; i++)
921 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
922 i, single_dpm_table->dpm_levels[i].value,
923 (clocks.num_levels == 1) ? "*" :
924 (aldebaran_freqs_in_same_level(
925 clocks.data[i].clocks_in_khz / 1000,
926 now) ? "*" : ""));
927 break;
928
929 default:
930 break;
931 }
932
933 return size;
934}
935
936static int aldebaran_upload_dpm_level(struct smu_context *smu,
937 bool_Bool max,
938 uint32_t feature_mask,
939 uint32_t level)
940{
941 struct smu_13_0_dpm_context *dpm_context =
942 smu->smu_dpm.dpm_context;
943 uint32_t freq;
944 int ret = 0;
945
946 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
947 (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)(1ULL << 1))) {
948 freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
949 ret = smu_cmn_send_smc_msg_with_param(smu,
950 (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
951 (PPCLK_GFXCLK << 16) | (freq & 0xffff),
952 NULL((void *)0));
953 if (ret) {
954 dev_err(smu->adev->dev, "Failed to set soft %s gfxclk !\n",printf("drm:pid%d:%s *ERROR* " "Failed to set soft %s gfxclk !\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , max ? "max"
: "min")
955 max ? "max" : "min")printf("drm:pid%d:%s *ERROR* " "Failed to set soft %s gfxclk !\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , max ? "max"
: "min")
;
956 return ret;
957 }
958 }
959
960 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
961 (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK_BIT)(1ULL << 2))) {
962 freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level].value;
963 ret = smu_cmn_send_smc_msg_with_param(smu,
964 (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
965 (PPCLK_UCLK << 16) | (freq & 0xffff),
966 NULL((void *)0));
967 if (ret) {
968 dev_err(smu->adev->dev, "Failed to set soft %s memclk !\n",printf("drm:pid%d:%s *ERROR* " "Failed to set soft %s memclk !\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , max ? "max"
: "min")
969 max ? "max" : "min")printf("drm:pid%d:%s *ERROR* " "Failed to set soft %s memclk !\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , max ? "max"
: "min")
;
970 return ret;
971 }
972 }
973
974 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
975 (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)(1ULL << 3))) {
976 freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
977 ret = smu_cmn_send_smc_msg_with_param(smu,
978 (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
979 (PPCLK_SOCCLK << 16) | (freq & 0xffff),
980 NULL((void *)0));
981 if (ret) {
982 dev_err(smu->adev->dev, "Failed to set soft %s socclk !\n",printf("drm:pid%d:%s *ERROR* " "Failed to set soft %s socclk !\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , max ? "max"
: "min")
983 max ? "max" : "min")printf("drm:pid%d:%s *ERROR* " "Failed to set soft %s socclk !\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , max ? "max"
: "min")
;
984 return ret;
985 }
986 }
987
988 return ret;
989}
990
991static int aldebaran_force_clk_levels(struct smu_context *smu,
992 enum smu_clk_type type, uint32_t mask)
993{
994 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
995 struct smu_13_0_dpm_table *single_dpm_table = NULL((void *)0);
996 uint32_t soft_min_level, soft_max_level;
997 int ret = 0;
998
999 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1000 soft_max_level = mask ? (fls(mask) - 1) : 0;
1001
1002 switch (type) {
1003 case SMU_SCLK:
1004 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1005 if (soft_max_level >= single_dpm_table->count) {
1006 dev_err(smu->adev->dev, "Clock level specified %d is over max allowed %d\n",printf("drm:pid%d:%s *ERROR* " "Clock level specified %d is over max allowed %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , soft_max_level
, single_dpm_table->count - 1)
1007 soft_max_level, single_dpm_table->count - 1)printf("drm:pid%d:%s *ERROR* " "Clock level specified %d is over max allowed %d\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , soft_max_level
, single_dpm_table->count - 1)
;
1008 ret = -EINVAL22;
1009 break;
1010 }
1011
1012 ret = aldebaran_upload_dpm_level(smu,
1013 false0,
1014 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)(1ULL << 1),
1015 soft_min_level);
1016 if (ret) {
1017 dev_err(smu->adev->dev, "Failed to upload boot level to lowest!\n")printf("drm:pid%d:%s *ERROR* " "Failed to upload boot level to lowest!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1018 break;
1019 }
1020
1021 ret = aldebaran_upload_dpm_level(smu,
1022 true1,
1023 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)(1ULL << 1),
1024 soft_max_level);
1025 if (ret)
1026 dev_err(smu->adev->dev, "Failed to upload dpm max level to highest!\n")printf("drm:pid%d:%s *ERROR* " "Failed to upload dpm max level to highest!\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1027
1028 break;
1029
1030 case SMU_MCLK:
1031 case SMU_SOCCLK:
1032 case SMU_FCLK:
1033 /*
1034 * Should not arrive here since aldebaran does not
1035 * support mclk/socclk/fclk softmin/softmax settings
1036 */
1037 ret = -EINVAL22;
1038 break;
1039
1040 default:
1041 break;
1042 }
1043
1044 return ret;
1045}
1046
1047static int aldebaran_get_thermal_temperature_range(struct smu_context *smu,
1048 struct smu_temperature_range *range)
1049{
1050 struct smu_table_context *table_context = &smu->smu_table;
1051 struct smu_13_0_powerplay_table *powerplay_table =
1052 table_context->power_play_table;
1053 PPTable_t *pptable = smu->smu_table.driver_pptable;
1054
1055 if (!range)
1056 return -EINVAL22;
1057
1058 memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range))__builtin_memcpy((range), (&smu13_thermal_policy[0]), (sizeof
(struct smu_temperature_range)))
;
1059
1060 range->hotspot_crit_max = pptable->ThotspotLimit *
1061 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
1062 range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT5) *
1063 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
1064 range->mem_crit_max = pptable->TmemLimit *
1065 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
1066 range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM5)*
1067 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES1000;
1068 range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
1069
1070 return 0;
1071}
1072
1073static int aldebaran_get_current_activity_percent(struct smu_context *smu,
1074 enum amd_pp_sensors sensor,
1075 uint32_t *value)
1076{
1077 int ret = 0;
1078
1079 if (!value)
1080 return -EINVAL22;
1081
1082 switch (sensor) {
1083 case AMDGPU_PP_SENSOR_GPU_LOAD:
1084 ret = aldebaran_get_smu_metrics_data(smu,
1085 METRICS_AVERAGE_GFXACTIVITY,
1086 value);
1087 break;
1088 case AMDGPU_PP_SENSOR_MEM_LOAD:
1089 ret = aldebaran_get_smu_metrics_data(smu,
1090 METRICS_AVERAGE_MEMACTIVITY,
1091 value);
1092 break;
1093 default:
1094 dev_err(smu->adev->dev, "Invalid sensor for retrieving clock activity\n")printf("drm:pid%d:%s *ERROR* " "Invalid sensor for retrieving clock activity\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1095 return -EINVAL22;
1096 }
1097
1098 return ret;
1099}
1100
1101static int aldebaran_get_gpu_power(struct smu_context *smu, uint32_t *value)
1102{
1103 if (!value)
1104 return -EINVAL22;
1105
1106 return aldebaran_get_smu_metrics_data(smu,
1107 METRICS_AVERAGE_SOCKETPOWER,
1108 value);
1109}
1110
1111static int aldebaran_thermal_get_temperature(struct smu_context *smu,
1112 enum amd_pp_sensors sensor,
1113 uint32_t *value)
1114{
1115 int ret = 0;
1116
1117 if (!value)
1118 return -EINVAL22;
1119
1120 switch (sensor) {
1121 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1122 ret = aldebaran_get_smu_metrics_data(smu,
1123 METRICS_TEMPERATURE_HOTSPOT,
1124 value);
1125 break;
1126 case AMDGPU_PP_SENSOR_EDGE_TEMP:
1127 ret = aldebaran_get_smu_metrics_data(smu,
1128 METRICS_TEMPERATURE_EDGE,
1129 value);
1130 break;
1131 case AMDGPU_PP_SENSOR_MEM_TEMP:
1132 ret = aldebaran_get_smu_metrics_data(smu,
1133 METRICS_TEMPERATURE_MEM,
1134 value);
1135 break;
1136 default:
1137 dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n")printf("drm:pid%d:%s *ERROR* " "Invalid sensor for retrieving temp\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1138 return -EINVAL22;
1139 }
1140
1141 return ret;
1142}
1143
1144static int aldebaran_read_sensor(struct smu_context *smu,
1145 enum amd_pp_sensors sensor,
1146 void *data, uint32_t *size)
1147{
1148 int ret = 0;
1149
1150 if (amdgpu_ras_intr_triggered())
1151 return 0;
1152
1153 if (!data || !size)
1154 return -EINVAL22;
1155
1156 switch (sensor) {
1157 case AMDGPU_PP_SENSOR_MEM_LOAD:
1158 case AMDGPU_PP_SENSOR_GPU_LOAD:
1159 ret = aldebaran_get_current_activity_percent(smu,
1160 sensor,
1161 (uint32_t *)data);
1162 *size = 4;
1163 break;
1164 case AMDGPU_PP_SENSOR_GPU_POWER:
1165 ret = aldebaran_get_gpu_power(smu, (uint32_t *)data);
1166 *size = 4;
1167 break;
1168 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1169 case AMDGPU_PP_SENSOR_EDGE_TEMP:
1170 case AMDGPU_PP_SENSOR_MEM_TEMP:
1171 ret = aldebaran_thermal_get_temperature(smu, sensor,
1172 (uint32_t *)data);
1173 *size = 4;
1174 break;
1175 case AMDGPU_PP_SENSOR_GFX_MCLK:
1176 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
1177 /* the output clock frequency in 10K unit */
1178 *(uint32_t *)data *= 100;
1179 *size = 4;
1180 break;
1181 case AMDGPU_PP_SENSOR_GFX_SCLK:
1182 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
1183 *(uint32_t *)data *= 100;
1184 *size = 4;
1185 break;
1186 case AMDGPU_PP_SENSOR_VDDGFX:
1187 ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
1188 *size = 4;
1189 break;
1190 default:
1191 ret = -EOPNOTSUPP45;
1192 break;
1193 }
1194
1195 return ret;
1196}
1197
1198static int aldebaran_get_power_limit(struct smu_context *smu,
1199 uint32_t *current_power_limit,
1200 uint32_t *default_power_limit,
1201 uint32_t *max_power_limit)
1202{
1203 PPTable_t *pptable = smu->smu_table.driver_pptable;
1204 uint32_t power_limit = 0;
1205 int ret;
1206
1207 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
1208 if (current_power_limit)
1209 *current_power_limit = 0;
1210 if (default_power_limit)
1211 *default_power_limit = 0;
1212 if (max_power_limit)
1213 *max_power_limit = 0;
1214
1215 dev_warn(smu->adev->dev,printf("drm:pid%d:%s *WARNING* " "PPT feature is not enabled, power values can't be fetched."
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
1216 "PPT feature is not enabled, power values can't be fetched.")printf("drm:pid%d:%s *WARNING* " "PPT feature is not enabled, power values can't be fetched."
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1217
1218 return 0;
1219 }
1220
1221 /* Valid power data is available only from primary die.
1222 * For secondary die show the value as 0.
1223 */
1224 if (aldebaran_is_primary(smu)) {
1225 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit,
1226 &power_limit);
1227
1228 if (ret) {
1229 /* the last hope to figure out the ppt limit */
1230 if (!pptable) {
1231 dev_err(smu->adev->dev,printf("drm:pid%d:%s *ERROR* " "Cannot get PPT limit due to pptable missing!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
1232 "Cannot get PPT limit due to pptable missing!")printf("drm:pid%d:%s *ERROR* " "Cannot get PPT limit due to pptable missing!"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1233 return -EINVAL22;
1234 }
1235 power_limit = pptable->PptLimit;
1236 }
1237 }
1238
1239 if (current_power_limit)
1240 *current_power_limit = power_limit;
1241 if (default_power_limit)
1242 *default_power_limit = power_limit;
1243
1244 if (max_power_limit) {
1245 if (pptable)
1246 *max_power_limit = pptable->PptLimit;
1247 }
1248
1249 return 0;
1250}
1251
1252static int aldebaran_set_power_limit(struct smu_context *smu,
1253 enum smu_ppt_limit_type limit_type,
1254 uint32_t limit)
1255{
1256 /* Power limit can be set only through primary die */
1257 if (aldebaran_is_primary(smu))
1258 return smu_v13_0_set_power_limit(smu, limit_type, limit);
1259
1260 return -EINVAL22;
1261}
1262
1263static int aldebaran_system_features_control(struct smu_context *smu, bool_Bool enable)
1264{
1265 int ret;
1266
1267 ret = smu_v13_0_system_features_control(smu, enable);
1268 if (!ret && enable)
1269 ret = aldebaran_run_btc(smu);
1270
1271 return ret;
1272}
1273
1274static int aldebaran_set_performance_level(struct smu_context *smu,
1275 enum amd_dpm_forced_level level)
1276{
1277 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1278 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1279 struct smu_13_0_dpm_table *gfx_table =
1280 &dpm_context->dpm_tables.gfx_table;
1281 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1282
1283 /* Disable determinism if switching to another mode */
1284 if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
1285 (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
1286 smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL((void *)0));
1287 pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
1288 }
1289
1290 switch (level) {
1291
1292 case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
1293 return 0;
1294
1295 case AMD_DPM_FORCED_LEVEL_HIGH:
1296 case AMD_DPM_FORCED_LEVEL_LOW:
1297 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1298 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1299 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1300 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1301 default:
1302 break;
1303 }
1304
1305 return smu_v13_0_set_performance_level(smu, level);
1306}
1307
1308static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
1309 enum smu_clk_type clk_type,
1310 uint32_t min,
1311 uint32_t max)
1312{
1313 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1314 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1315 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1316 struct amdgpu_device *adev = smu->adev;
1317 uint32_t min_clk;
1318 uint32_t max_clk;
1319 int ret = 0;
1320
1321 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
1322 return -EINVAL22;
1323
1324 if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1325 && (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1326 return -EINVAL22;
1327
1328 if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
1329 if (min >= max) {
1330 dev_err(smu->adev->dev,printf("drm:pid%d:%s *ERROR* " "Minimum GFX clk should be less than the maximum allowed clock\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
1331 "Minimum GFX clk should be less than the maximum allowed clock\n")printf("drm:pid%d:%s *ERROR* " "Minimum GFX clk should be less than the maximum allowed clock\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1332 return -EINVAL22;
1333 }
1334
1335 if ((min == pstate_table->gfxclk_pstate.curr.min) &&
1336 (max == pstate_table->gfxclk_pstate.curr.max))
1337 return 0;
1338
1339 ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK,
1340 min, max);
1341 if (!ret) {
1342 pstate_table->gfxclk_pstate.curr.min = min;
1343 pstate_table->gfxclk_pstate.curr.max = max;
1344 }
1345
1346 return ret;
1347 }
1348
1349 if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1350 if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
1351 (max > dpm_context->dpm_tables.gfx_table.max)) {
1352 dev_warn(adev->dev,printf("drm:pid%d:%s *WARNING* " "Invalid max frequency %d MHz specified for determinism\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , max)
1353 "Invalid max frequency %d MHz specified for determinism\n", max)printf("drm:pid%d:%s *WARNING* " "Invalid max frequency %d MHz specified for determinism\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , max)
;
1354 return -EINVAL22;
1355 }
1356
1357 /* Restore default min/max clocks and enable determinism */
1358 min_clk = dpm_context->dpm_tables.gfx_table.min;
1359 max_clk = dpm_context->dpm_tables.gfx_table.max;
1360 ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
1361 if (!ret) {
1362 usleep_range(500, 1000);
1363 ret = smu_cmn_send_smc_msg_with_param(smu,
1364 SMU_MSG_EnableDeterminism,
1365 max, NULL((void *)0));
1366 if (ret) {
1367 dev_err(adev->dev,printf("drm:pid%d:%s *ERROR* " "Failed to enable determinism at GFX clock %d MHz\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , max)
1368 "Failed to enable determinism at GFX clock %d MHz\n", max)printf("drm:pid%d:%s *ERROR* " "Failed to enable determinism at GFX clock %d MHz\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , max)
;
1369 } else {
1370 pstate_table->gfxclk_pstate.curr.min = min_clk;
1371 pstate_table->gfxclk_pstate.curr.max = max;
1372 }
1373 }
1374 }
1375
1376 return ret;
1377}
1378
1379static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
1380 long input[], uint32_t size)
1381{
1382 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1383 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1384 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1385 uint32_t min_clk;
1386 uint32_t max_clk;
1387 int ret = 0;
1388
1389 /* Only allowed in manual or determinism mode */
1390 if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1391 && (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1392 return -EINVAL22;
1393
1394 switch (type) {
1395 case PP_OD_EDIT_SCLK_VDDC_TABLE:
1396 if (size != 2) {
1397 dev_err(smu->adev->dev, "Input parameter number not correct\n")printf("drm:pid%d:%s *ERROR* " "Input parameter number not correct\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1398 return -EINVAL22;
1399 }
1400
1401 if (input[0] == 0) {
1402 if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
1403 dev_warn(smu->adev->dev, "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",printf("drm:pid%d:%s *WARNING* " "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , input[
1], dpm_context->dpm_tables.gfx_table.min)
1404 input[1], dpm_context->dpm_tables.gfx_table.min)printf("drm:pid%d:%s *WARNING* " "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , input[
1], dpm_context->dpm_tables.gfx_table.min)
;
1405 pstate_table->gfxclk_pstate.custom.min =
1406 pstate_table->gfxclk_pstate.curr.min;
1407 return -EINVAL22;
1408 }
1409
1410 pstate_table->gfxclk_pstate.custom.min = input[1];
1411 } else if (input[0] == 1) {
1412 if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
1413 dev_warn(smu->adev->dev, "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",printf("drm:pid%d:%s *WARNING* " "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , input[
1], dpm_context->dpm_tables.gfx_table.max)
1414 input[1], dpm_context->dpm_tables.gfx_table.max)printf("drm:pid%d:%s *WARNING* " "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , input[
1], dpm_context->dpm_tables.gfx_table.max)
;
1415 pstate_table->gfxclk_pstate.custom.max =
1416 pstate_table->gfxclk_pstate.curr.max;
1417 return -EINVAL22;
1418 }
1419
1420 pstate_table->gfxclk_pstate.custom.max = input[1];
1421 } else {
1422 return -EINVAL22;
1423 }
1424 break;
1425 case PP_OD_RESTORE_DEFAULT_TABLE:
1426 if (size != 0) {
1427 dev_err(smu->adev->dev, "Input parameter number not correct\n")printf("drm:pid%d:%s *ERROR* " "Input parameter number not correct\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1428 return -EINVAL22;
1429 } else {
1430 /* Use the default frequencies for manual and determinism mode */
1431 min_clk = dpm_context->dpm_tables.gfx_table.min;
1432 max_clk = dpm_context->dpm_tables.gfx_table.max;
1433
1434 return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
1435 }
1436 break;
1437 case PP_OD_COMMIT_DPM_TABLE:
1438 if (size != 0) {
1439 dev_err(smu->adev->dev, "Input parameter number not correct\n")printf("drm:pid%d:%s *ERROR* " "Input parameter number not correct\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
1440 return -EINVAL22;
1441 } else {
1442 if (!pstate_table->gfxclk_pstate.custom.min)
1443 pstate_table->gfxclk_pstate.custom.min =
1444 pstate_table->gfxclk_pstate.curr.min;
1445
1446 if (!pstate_table->gfxclk_pstate.custom.max)
1447 pstate_table->gfxclk_pstate.custom.max =
1448 pstate_table->gfxclk_pstate.curr.max;
1449
1450 min_clk = pstate_table->gfxclk_pstate.custom.min;
1451 max_clk = pstate_table->gfxclk_pstate.custom.max;
1452
1453 return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
1454 }
1455 break;
1456 default:
1457 return -ENOSYS78;
1458 }
1459
1460 return ret;
1461}
1462
1463static bool_Bool aldebaran_is_dpm_running(struct smu_context *smu)
1464{
1465 int ret;
1466 uint64_t feature_enabled;
1467
1468 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
1469 if (ret)
1470 return false0;
1471 return !!(feature_enabled & SMC_DPM_FEATURE( (1ULL << 0) | (1ULL << 1) | (1ULL << 2) |
(1ULL << 3) | (1ULL << 4) | (1ULL << 5) | (
1ULL << 6) | (1ULL << 13))
);
1472}
1473
1474static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
1475 struct i2c_msg *msg, int num_msgs)
1476{
1477 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
1478 struct amdgpu_device *adev = smu_i2c->adev;
1479 struct smu_context *smu = adev->powerplay.pp_handle;
1480 struct smu_table_context *smu_table = &smu->smu_table;
1481 struct smu_table *table = &smu_table->driver_table;
1482 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
1483 int i, j, r, c;
1484 u16 dir;
1485
1486 if (!adev->pm.dpm_enabled)
1487 return -EBUSY16;
1488
1489 req = kzalloc(sizeof(*req), GFP_KERNEL(0x0001 | 0x0004));
1490 if (!req)
1491 return -ENOMEM12;
1492
1493 req->I2CcontrollerPort = smu_i2c->port;
1494 req->I2CSpeed = I2C_SPEED_FAST_400K;
1495 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
1496 dir = msg[0].flags & I2C_M_RD0x0001;
1497
1498 for (c = i = 0; i < num_msgs; i++) {
1499 for (j = 0; j < msg[i].len; j++, c++) {
1500 SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
1501
1502 if (!(msg[i].flags & I2C_M_RD0x0001)) {
1503 /* write */
1504 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK(1 << 2);
1505 cmd->ReadWriteData = msg[i].buf[j];
1506 }
1507
1508 if ((dir ^ msg[i].flags) & I2C_M_RD0x0001) {
1509 /* The direction changes.
1510 */
1511 dir = msg[i].flags & I2C_M_RD0x0001;
1512 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK(1 << 1);
1513 }
1514
1515 req->NumCmds++;
1516
1517 /*
1518 * Insert STOP if we are at the last byte of either last
1519 * message for the transaction or the client explicitly
1520 * requires a STOP at this particular message.
1521 */
1522 if ((j == msg[i].len - 1) &&
1523 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP0x0004))) {
1524 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK(1 << 1);
1525 cmd->CmdConfig |= CMDCONFIG_STOP_MASK(1 << 0);
1526 }
1527 }
1528 }
1529 mutex_lock(&adev->pm.mutex)rw_enter_write(&adev->pm.mutex);
1530 r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true1);
1531 mutex_unlock(&adev->pm.mutex)rw_exit_write(&adev->pm.mutex);
1532 if (r)
1533 goto fail;
1534
1535 for (c = i = 0; i < num_msgs; i++) {
1536 if (!(msg[i].flags & I2C_M_RD0x0001)) {
1537 c += msg[i].len;
1538 continue;
1539 }
1540 for (j = 0; j < msg[i].len; j++, c++) {
1541 SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
1542
1543 msg[i].buf[j] = cmd->ReadWriteData;
1544 }
1545 }
1546 r = num_msgs;
1547fail:
1548 kfree(req);
1549 return r;
1550}
1551
1552static u32 aldebaran_i2c_func(struct i2c_adapter *adap)
1553{
1554 return I2C_FUNC_I2C0 | I2C_FUNC_SMBUS_EMUL0;
1555}
1556
1557
1558static const struct i2c_algorithm aldebaran_i2c_algo = {
1559 .master_xfer = aldebaran_i2c_xfer,
1560 .functionality = aldebaran_i2c_func,
1561};
1562
1563static const struct i2c_adapter_quirks aldebaran_i2c_control_quirks = {
1564 .flags = I2C_AQ_COMB0 | I2C_AQ_COMB_SAME_ADDR0 | I2C_AQ_NO_ZERO_LEN0,
1565 .max_read_len = MAX_SW_I2C_COMMANDS24,
1566 .max_write_len = MAX_SW_I2C_COMMANDS24,
1567 .max_comb_1st_msg_len = 2,
1568 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS24 - 2,
1569};
1570
1571static int aldebaran_i2c_control_init(struct smu_context *smu)
1572{
1573 struct amdgpu_device *adev = smu->adev;
1574 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[0];
1575 struct i2c_adapter *control = &smu_i2c->adapter;
1576 int res;
1577
1578 smu_i2c->adev = adev;
1579 smu_i2c->port = 0;
1580 rw_init(&smu_i2c->mutex, "aldiic")_rw_init_flags(&smu_i2c->mutex, "aldiic", 0, ((void *)
0))
;
1581#ifdef __linux__
1582 control->owner = THIS_MODULE((void *)0);
1583 control->class = I2C_CLASS_SPD;
1584 control->dev.parent = &adev->pdev->dev;
1585#endif
1586 control->algo = &aldebaran_i2c_algo;
1587 snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0");
1588 control->quirks = &aldebaran_i2c_control_quirks;
1589 i2c_set_adapdata(control, smu_i2c);
1590
1591 res = i2c_add_adapter(control);
1592 if (res) {
1593 DRM_ERROR("Failed to register hw i2c, err: %d\n", res)__drm_err("Failed to register hw i2c, err: %d\n", res);
1594 goto Out_err;
1595 }
1596
1597 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1598 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1599
1600 return 0;
1601Out_err:
1602 i2c_del_adapter(control);
1603
1604 return res;
1605}
1606
1607static void aldebaran_i2c_control_fini(struct smu_context *smu)
1608{
1609 struct amdgpu_device *adev = smu->adev;
1610 int i;
1611
1612 for (i = 0; i < MAX_SMU_I2C_BUSES2; i++) {
1613 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1614 struct i2c_adapter *control = &smu_i2c->adapter;
1615
1616 i2c_del_adapter(control);
1617 }
1618 adev->pm.ras_eeprom_i2c_bus = NULL((void *)0);
1619 adev->pm.fru_eeprom_i2c_bus = NULL((void *)0);
1620}
1621
1622static void aldebaran_get_unique_id(struct smu_context *smu)
1623{
1624 struct amdgpu_device *adev = smu->adev;
1625 uint32_t upper32 = 0, lower32 = 0;
1626
1627 if (aldebaran_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_UPPER32, &upper32))
1628 goto out;
1629 if (aldebaran_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_LOWER32, &lower32))
1630 goto out;
1631
1632out:
1633 adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1634 if (adev->serial[0] == '\0')
1635 snprintf(adev->serial, sizeof(adev->serial), "%016llx", adev->unique_id);
1636}
1637
1638static bool_Bool aldebaran_is_baco_supported(struct smu_context *smu)
1639{
1640 /* aldebaran is not support baco */
1641
1642 return false0;
1643}
1644
1645static int aldebaran_set_df_cstate(struct smu_context *smu,
1646 enum pp_df_cstate state)
1647{
1648 struct amdgpu_device *adev = smu->adev;
1649
1650 /*
1651 * Aldebaran does not need the cstate disablement
1652 * prerequisite for gpu reset.
1653 */
1654 if (amdgpu_in_reset(adev) || adev->in_suspend)
1655 return 0;
1656
1657 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL((void *)0));
1658}
1659
1660static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool_Bool en)
1661{
1662 struct amdgpu_device *adev = smu->adev;
1663
1664 /* The message only works on master die and NACK will be sent
1665 back for other dies, only send it on master die */
1666 if (!adev->smuio.funcs->get_socket_id(adev) &&
1667 !adev->smuio.funcs->get_die_id(adev))
1668 return smu_cmn_send_smc_msg_with_param(smu,
1669 SMU_MSG_GmiPwrDnControl,
1670 en ? 0 : 1,
1671 NULL((void *)0));
1672 else
1673 return 0;
1674}
1675
1676static const struct throttling_logging_label {
1677 uint32_t feature_mask;
1678 const char *label;
1679} logging_label[] = {
1680 {(1U << THROTTLER_TEMP_GPU_BIT6), "GPU"},
1681 {(1U << THROTTLER_TEMP_MEM_BIT7), "HBM"},
1682 {(1U << THROTTLER_TEMP_VR_GFX_BIT11), "VR of GFX rail"},
1683 {(1U << THROTTLER_TEMP_VR_MEM_BIT13), "VR of HBM rail"},
1684 {(1U << THROTTLER_TEMP_VR_SOC_BIT12), "VR of SOC rail"},
1685};
1686static void aldebaran_log_thermal_throttling_event(struct smu_context *smu)
1687{
1688 int ret;
1689 int throttler_idx, throtting_events = 0, buf_idx = 0;
1690 struct amdgpu_device *adev = smu->adev;
Value stored to 'adev' during its initialization is never read
1691 uint32_t throttler_status;
1692 char log_buf[256];
1693
1694 ret = aldebaran_get_smu_metrics_data(smu,
1695 METRICS_THROTTLER_STATUS,
1696 &throttler_status);
1697 if (ret)
1698 return;
1699
1700 memset(log_buf, 0, sizeof(log_buf))__builtin_memset((log_buf), (0), (sizeof(log_buf)));
1701 for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label)(sizeof((logging_label)) / sizeof((logging_label)[0]));
1702 throttler_idx++) {
1703 if (throttler_status & logging_label[throttler_idx].feature_mask) {
1704 throtting_events++;
1705 buf_idx += snprintf(log_buf + buf_idx,
1706 sizeof(log_buf) - buf_idx,
1707 "%s%s",
1708 throtting_events > 1 ? " and " : "",
1709 logging_label[throttler_idx].label);
1710 if (buf_idx >= sizeof(log_buf)) {
1711 dev_err(adev->dev, "buffer overflow!\n")printf("drm:pid%d:%s *ERROR* " "buffer overflow!\n", ({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_curproc->p_p->ps_pid, __func__)
;
1712 log_buf[sizeof(log_buf) - 1] = '\0';
1713 break;
1714 }
1715 }
1716 }
1717
1718 dev_warn(adev->dev, "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n",printf("drm:pid%d:%s *WARNING* " "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , log_buf
)
1719 log_buf)printf("drm:pid%d:%s *WARNING* " "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , log_buf
)
;
1720 kgd2kfd_smi_event_throttle(smu->adev->kfd.dev,
1721 smu_cmn_get_indep_throttler_status(throttler_status,
1722 aldebaran_throttler_map));
1723}
1724
1725static int aldebaran_get_current_pcie_link_speed(struct smu_context *smu)
1726{
1727 struct amdgpu_device *adev = smu->adev;
1728 uint32_t esm_ctrl;
1729
1730 /* TODO: confirm this on real target */
1731 esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL)adev->pcie_rreg(adev, (0x111003D0));
1732 if ((esm_ctrl >> 15) & 0x1FFFF)
1733 return (((esm_ctrl >> 8) & 0x3F) + 128);
1734
1735 return smu_v13_0_get_current_pcie_link_speed(smu);
1736}
1737
1738static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
1739 void **table)
1740{
1741 struct smu_table_context *smu_table = &smu->smu_table;
1742 struct gpu_metrics_v1_3 *gpu_metrics =
1743 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
1744 SmuMetrics_t metrics;
1745 int i, ret = 0;
1746
1747 ret = smu_cmn_get_metrics_table(smu,
1748 &metrics,
1749 true1);
1750 if (ret)
1751 return ret;
1752
1753 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
1754
1755 gpu_metrics->temperature_edge = metrics.TemperatureEdge;
1756 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
1757 gpu_metrics->temperature_mem = metrics.TemperatureHBM;
1758 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
1759 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
1760 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem;
1761
1762 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
1763 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
1764 gpu_metrics->average_mm_activity = 0;
1765
1766 /* Valid power data is available only from primary die */
1767 if (aldebaran_is_primary(smu)) {
1768 gpu_metrics->average_socket_power = metrics.AverageSocketPower;
1769 gpu_metrics->energy_accumulator =
1770 (uint64_t)metrics.EnergyAcc64bitHigh << 32 |
1771 metrics.EnergyAcc64bitLow;
1772 } else {
1773 gpu_metrics->average_socket_power = 0;
1774 gpu_metrics->energy_accumulator = 0;
1775 }
1776
1777 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
1778 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
1779 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
1780 gpu_metrics->average_vclk0_frequency = 0;
1781 gpu_metrics->average_dclk0_frequency = 0;
1782
1783 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
1784 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
1785 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
1786 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
1787 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
1788
1789 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
1790 gpu_metrics->indep_throttle_status =
1791 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
1792 aldebaran_throttler_map);
1793
1794 gpu_metrics->current_fan_speed = 0;
1795
1796 gpu_metrics->pcie_link_width =
1797 smu_v13_0_get_current_pcie_link_width(smu);
1798 gpu_metrics->pcie_link_speed =
1799 aldebaran_get_current_pcie_link_speed(smu);
1800
1801 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1802
1803 gpu_metrics->gfx_activity_acc = metrics.GfxBusyAcc;
1804 gpu_metrics->mem_activity_acc = metrics.DramBusyAcc;
1805
1806 for (i = 0; i < NUM_HBM_INSTANCES4; i++)
1807 gpu_metrics->temperature_hbm[i] = metrics.TemperatureAllHBM[i];
1808
1809 gpu_metrics->firmware_timestamp = ((uint64_t)metrics.TimeStampHigh << 32) |
1810 metrics.TimeStampLow;
1811
1812 *table = (void *)gpu_metrics;
1813
1814 return sizeof(struct gpu_metrics_v1_3);
1815}
1816
1817static int aldebaran_check_ecc_table_support(struct smu_context *smu,
1818 int *ecctable_version)
1819{
1820 uint32_t if_version = 0xff, smu_version = 0xff;
1821 int ret = 0;
1822
1823 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
1824 if (ret) {
1825 /* return not support if failed get smu_version */
1826 ret = -EOPNOTSUPP45;
1827 }
1828
1829 if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION0x00442a00)
1830 ret = -EOPNOTSUPP45;
1831 else if (smu_version >= SUPPORT_ECCTABLE_SMU_VERSION0x00442a00 &&
1832 smu_version < SUPPORT_ECCTABLE_V2_SMU_VERSION0x00443700)
1833 *ecctable_version = 1;
1834 else
1835 *ecctable_version = 2;
1836
1837 return ret;
1838}
1839
1840static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
1841 void *table)
1842{
1843 struct smu_table_context *smu_table = &smu->smu_table;
1844 EccInfoTable_t *ecc_table = NULL((void *)0);
1845 struct ecc_info_per_ch *ecc_info_per_channel = NULL((void *)0);
1846 int i, ret = 0;
1847 int table_version = 0;
1848 struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
1849
1850 ret = aldebaran_check_ecc_table_support(smu, &table_version);
1851 if (ret)
1852 return ret;
1853
1854 ret = smu_cmn_update_table(smu,
1855 SMU_TABLE_ECCINFO,
1856 0,
1857 smu_table->ecc_table,
1858 false0);
1859 if (ret) {
1860 dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n")do { } while(0);
1861 return ret;
1862 }
1863
1864 ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
1865
1866 if (table_version == 1) {
1867 for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM32; i++) {
1868 ecc_info_per_channel = &(eccinfo->ecc[i]);
1869 ecc_info_per_channel->ce_count_lo_chip =
1870 ecc_table->EccInfo[i].ce_count_lo_chip;
1871 ecc_info_per_channel->ce_count_hi_chip =
1872 ecc_table->EccInfo[i].ce_count_hi_chip;
1873 ecc_info_per_channel->mca_umc_status =
1874 ecc_table->EccInfo[i].mca_umc_status;
1875 ecc_info_per_channel->mca_umc_addr =
1876 ecc_table->EccInfo[i].mca_umc_addr;
1877 }
1878 } else if (table_version == 2) {
1879 for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM32; i++) {
1880 ecc_info_per_channel = &(eccinfo->ecc[i]);
1881 ecc_info_per_channel->ce_count_lo_chip =
1882 ecc_table->EccInfo_V2[i].ce_count_lo_chip;
1883 ecc_info_per_channel->ce_count_hi_chip =
1884 ecc_table->EccInfo_V2[i].ce_count_hi_chip;
1885 ecc_info_per_channel->mca_umc_status =
1886 ecc_table->EccInfo_V2[i].mca_umc_status;
1887 ecc_info_per_channel->mca_umc_addr =
1888 ecc_table->EccInfo_V2[i].mca_umc_addr;
1889 ecc_info_per_channel->mca_ceumc_addr =
1890 ecc_table->EccInfo_V2[i].mca_ceumc_addr;
1891 }
1892 eccinfo->record_ce_addr_supported = 1;
1893 }
1894
1895 return ret;
1896}
1897
1898static int aldebaran_mode1_reset(struct smu_context *smu)
1899{
1900 u32 smu_version, fatal_err, param;
1901 int ret = 0;
1902 struct amdgpu_device *adev = smu->adev;
1903 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1904
1905 fatal_err = 0;
1906 param = SMU_RESET_MODE_1;
1907
1908 /*
1909 * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07
1910 */
1911 smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version);
1912 if (smu_version < 0x00440700) {
1913 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL((void *)0));
1914 }
1915 else {
1916 /* fatal error triggered by ras, PMFW supports the flag
1917 from 68.44.0 */
1918 if ((smu_version >= 0x00442c00) && ras &&
1919 atomic_read(&ras->in_recovery)({ typeof(*(&ras->in_recovery)) __tmp = *(volatile typeof
(*(&ras->in_recovery)) *)&(*(&ras->in_recovery
)); membar_datadep_consumer(); __tmp; })
)
1920 fatal_err = 1;
1921
1922 param |= (fatal_err << 16);
1923 ret = smu_cmn_send_smc_msg_with_param(smu,
1924 SMU_MSG_GfxDeviceDriverReset, param, NULL((void *)0));
1925 }
1926
1927 if (!ret)
1928 drm_msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS)mdelay(500);
1929
1930 return ret;
1931}
1932
1933static int aldebaran_mode2_reset(struct smu_context *smu)
1934{
1935 u32 smu_version;
1936 int ret = 0, index;
1937 struct amdgpu_device *adev = smu->adev;
1938 int timeout = 10;
1939
1940 smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version);
1941
1942 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
1943 SMU_MSG_GfxDeviceDriverReset);
1944
1945 mutex_lock(&smu->message_lock)rw_enter_write(&smu->message_lock);
1946 if (smu_version >= 0x00441400) {
1947 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
1948 /* This is similar to FLR, wait till max FLR timeout */
1949 drm_msleep(100)mdelay(100);
1950 dev_dbg(smu->adev->dev, "restore config space...\n")do { } while(0);
1951 /* Restore the config space saved during init */
1952 amdgpu_device_load_pci_state(adev->pdev);
1953
1954 dev_dbg(smu->adev->dev, "wait for reset ack\n")do { } while(0);
1955 while (ret == -ETIME60 && timeout) {
1956 ret = smu_cmn_wait_for_response(smu);
1957 /* Wait a bit more time for getting ACK */
1958 if (ret == -ETIME60) {
1959 --timeout;
1960 usleep_range(500, 1000);
1961 continue;
1962 }
1963
1964 if (ret != 1) {
1965 dev_err(adev->dev, "failed to send mode2 message \tparam: 0x%08x response %#x\n",printf("drm:pid%d:%s *ERROR* " "failed to send mode2 message \tparam: 0x%08x response %#x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , SMU_RESET_MODE_2
, ret)
1966 SMU_RESET_MODE_2, ret)printf("drm:pid%d:%s *ERROR* " "failed to send mode2 message \tparam: 0x%08x response %#x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , SMU_RESET_MODE_2
, ret)
;
1967 goto out;
1968 }
1969 }
1970
1971 } else {
1972 dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n",printf("drm:pid%d:%s *ERROR* " "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , smu_version
)
1973 smu_version)printf("drm:pid%d:%s *ERROR* " "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , smu_version
)
;
1974 }
1975
1976 if (ret == 1)
1977 ret = 0;
1978out:
1979 mutex_unlock(&smu->message_lock)rw_exit_write(&smu->message_lock);
1980
1981 return ret;
1982}
1983
1984static int aldebaran_smu_handle_passthrough_sbr(struct smu_context *smu, bool_Bool enable)
1985{
1986 int ret = 0;
1987 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_HeavySBR, enable ? 1 : 0, NULL((void *)0));
1988
1989 return ret;
1990}
1991
1992static bool_Bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
1993{
1994#if 0
1995 struct amdgpu_device *adev = smu->adev;
1996 u32 smu_version;
1997 uint32_t val;
1998 /**
1999 * PM FW version support mode1 reset from 68.07
2000 */
2001 smu_cmn_get_smc_version(smu, NULL((void *)0), &smu_version);
2002 if ((smu_version < 0x00440700))
2003 return false0;
2004 /**
2005 * mode1 reset relies on PSP, so we should check if
2006 * PSP is alive.
2007 */
2008 val = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81)((((adev)->virt.caps & (1 << 2)) && adev
->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported
) ? amdgpu_sriov_rreg(adev, adev->reg_offset[MP0_HWIP][0][
0] + 0x0091, 0, MP0_HWIP) : amdgpu_device_rreg(adev, (adev->
reg_offset[MP0_HWIP][0][0] + 0x0091), 0))
;
2009
2010 return val != 0x0;
2011#endif
2012 return true1;
2013}
2014
2015static bool_Bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
2016{
2017 return true1;
2018}
2019
2020static int aldebaran_set_mp1_state(struct smu_context *smu,
2021 enum pp_mp1_state mp1_state)
2022{
2023 switch (mp1_state) {
2024 case PP_MP1_STATE_UNLOAD:
2025 return smu_cmn_set_mp1_state(smu, mp1_state);
2026 default:
2027 return 0;
2028 }
2029}
2030
2031static int aldebaran_smu_send_hbm_bad_page_num(struct smu_context *smu,
2032 uint32_t size)
2033{
2034 int ret = 0;
2035
2036 /* message SMU to update the bad page number on SMUBUS */
2037 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL((void *)0));
2038 if (ret)
2039 dev_err(smu->adev->dev, "[%s] failed to message SMU to update HBM bad pages number\n",printf("drm:pid%d:%s *ERROR* " "[%s] failed to message SMU to update HBM bad pages number\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
2040 __func__)printf("drm:pid%d:%s *ERROR* " "[%s] failed to message SMU to update HBM bad pages number\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
;
2041
2042 return ret;
2043}
2044
2045static int aldebaran_check_bad_channel_info_support(struct smu_context *smu)
2046{
2047 uint32_t if_version = 0xff, smu_version = 0xff;
2048 int ret = 0;
2049
2050 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
2051 if (ret) {
2052 /* return not support if failed get smu_version */
2053 ret = -EOPNOTSUPP45;
2054 }
2055
2056 if (smu_version < SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION0x00443300)
2057 ret = -EOPNOTSUPP45;
2058
2059 return ret;
2060}
2061
2062static int aldebaran_send_hbm_bad_channel_flag(struct smu_context *smu,
2063 uint32_t size)
2064{
2065 int ret = 0;
2066
2067 ret = aldebaran_check_bad_channel_info_support(smu);
2068 if (ret)
2069 return ret;
2070
2071 /* message SMU to update the bad channel info on SMUBUS */
2072 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetBadHBMPagesRetiredFlagsPerChannel, size, NULL((void *)0));
2073 if (ret)
2074 dev_err(smu->adev->dev, "[%s] failed to message SMU to update HBM bad channel info\n",printf("drm:pid%d:%s *ERROR* " "[%s] failed to message SMU to update HBM bad channel info\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
2075 __func__)printf("drm:pid%d:%s *ERROR* " "[%s] failed to message SMU to update HBM bad channel info\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , __func__
)
;
2076
2077 return ret;
2078}
2079
2080static const struct pptable_funcs aldebaran_ppt_funcs = {
2081 /* init dpm */
2082 .get_allowed_feature_mask = aldebaran_get_allowed_feature_mask,
2083 /* dpm/clk tables */
2084 .set_default_dpm_table = aldebaran_set_default_dpm_table,
2085 .populate_umd_state_clk = aldebaran_populate_umd_state_clk,
2086 .get_thermal_temperature_range = aldebaran_get_thermal_temperature_range,
2087 .print_clk_levels = aldebaran_print_clk_levels,
2088 .force_clk_levels = aldebaran_force_clk_levels,
2089 .read_sensor = aldebaran_read_sensor,
2090 .set_performance_level = aldebaran_set_performance_level,
2091 .get_power_limit = aldebaran_get_power_limit,
2092 .is_dpm_running = aldebaran_is_dpm_running,
2093 .get_unique_id = aldebaran_get_unique_id,
2094 .init_microcode = smu_v13_0_init_microcode,
2095 .load_microcode = smu_v13_0_load_microcode,
2096 .fini_microcode = smu_v13_0_fini_microcode,
2097 .init_smc_tables = aldebaran_init_smc_tables,
2098 .fini_smc_tables = smu_v13_0_fini_smc_tables,
2099 .init_power = smu_v13_0_init_power,
2100 .fini_power = smu_v13_0_fini_power,
2101 .check_fw_status = smu_v13_0_check_fw_status,
2102 /* pptable related */
2103 .setup_pptable = aldebaran_setup_pptable,
2104 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
2105 .check_fw_version = smu_v13_0_check_fw_version,
2106 .write_pptable = smu_cmn_write_pptable,
2107 .set_driver_table_location = smu_v13_0_set_driver_table_location,
2108 .set_tool_table_location = smu_v13_0_set_tool_table_location,
2109 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
2110 .system_features_control = aldebaran_system_features_control,
2111 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2112 .send_smc_msg = smu_cmn_send_smc_msg,
2113 .get_enabled_mask = smu_cmn_get_enabled_mask,
2114 .feature_is_enabled = smu_cmn_feature_is_enabled,
2115 .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
2116 .set_power_limit = aldebaran_set_power_limit,
2117 .init_max_sustainable_clocks = smu_v13_0_init_max_sustainable_clocks,
2118 .enable_thermal_alert = smu_v13_0_enable_thermal_alert,
2119 .disable_thermal_alert = smu_v13_0_disable_thermal_alert,
2120 .set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
2121 .register_irq_handler = smu_v13_0_register_irq_handler,
2122 .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme,
2123 .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc,
2124 .baco_is_support= aldebaran_is_baco_supported,
2125 .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
2126 .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range,
2127 .od_edit_dpm_table = aldebaran_usr_edit_dpm_table,
2128 .set_df_cstate = aldebaran_set_df_cstate,
2129 .allow_xgmi_power_down = aldebaran_allow_xgmi_power_down,
2130 .log_thermal_throttling_event = aldebaran_log_thermal_throttling_event,
2131 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2132 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2133 .get_gpu_metrics = aldebaran_get_gpu_metrics,
2134 .mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
2135 .mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
2136 .smu_handle_passthrough_sbr = aldebaran_smu_handle_passthrough_sbr,
2137 .mode1_reset = aldebaran_mode1_reset,
2138 .set_mp1_state = aldebaran_set_mp1_state,
2139 .mode2_reset = aldebaran_mode2_reset,
2140 .wait_for_event = smu_v13_0_wait_for_event,
2141 .i2c_init = aldebaran_i2c_control_init,
2142 .i2c_fini = aldebaran_i2c_control_fini,
2143 .send_hbm_bad_pages_num = aldebaran_smu_send_hbm_bad_page_num,
2144 .get_ecc_info = aldebaran_get_ecc_info,
2145 .send_hbm_bad_channel_flag = aldebaran_send_hbm_bad_channel_flag,
2146};
2147
2148void aldebaran_set_ppt_funcs(struct smu_context *smu)
2149{
2150 smu->ppt_funcs = &aldebaran_ppt_funcs;
2151 smu->message_map = aldebaran_message_map;
2152 smu->clock_map = aldebaran_clk_map;
2153 smu->feature_map = aldebaran_feature_mask_map;
2154 smu->table_map = aldebaran_table_map;
2155 smu_v13_0_set_smu_mailbox_registers(smu);
2156}