Bug Summary

File:dev/pci/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
Warning:line 1542, column 3
Value stored to 'result' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name tonga_smumgr.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "pp_debug.h"
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/pci.h>
27#include <linux/slab.h>
28#include <linux/gfp.h>
29
30#include "smumgr.h"
31#include "tonga_smumgr.h"
32#include "smu_ucode_xfer_vi.h"
33#include "tonga_ppsmc.h"
34#include "smu/smu_7_1_2_d.h"
35#include "smu/smu_7_1_2_sh_mask.h"
36#include "cgs_common.h"
37#include "smu7_smumgr.h"
38
39#include "smu7_dyn_defaults.h"
40
41#include "smu7_hwmgr.h"
42#include "hardwaremanager.h"
43#include "ppatomctrl.h"
44
45#include "atombios.h"
46
47#include "pppcielanes.h"
48#include "pp_endian.h"
49
50#include "gmc/gmc_8_1_d.h"
51#include "gmc/gmc_8_1_sh_mask.h"
52
53#include "bif/bif_5_0_d.h"
54#include "bif/bif_5_0_sh_mask.h"
55
56#include "dce/dce_10_0_d.h"
57#include "dce/dce_10_0_sh_mask.h"
58
59#define POWERTUNE_DEFAULT_SET_MAX1 1
60#define MC_CG_ARB_FREQ_F10x0b 0x0b
61#define VDDC_VDDCI_DELTA200 200
62
63
64static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX1] = {
65/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
66 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
67 */
68 {1, 0xF, 0xFD, 0x19,
69 5, 45, 0, 0xB0000,
70 {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
71 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
72 {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
73 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
74 },
75};
76
77/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
78static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
79 {600, 1050, 3, 0},
80 {600, 1050, 6, 1}
81};
82
83/* [FF, SS] type, [] 4 voltage ranges,
84 * and [Floor Freq, Boundary Freq, VID min , VID max]
85 */
86static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
87 { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
88 { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
89};
90
91/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
92static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
93 {0, 1, 3, 2, 4, 5},
94 {0, 2, 4, 5, 6, 5}
95};
96
97static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr)
98{
99 int result;
100
101 /* Assert reset */
102 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 &
((1) << 0x0)))))
103 SMC_SYSCON_RESET_CNTL, rst_reg, 1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 &
((1) << 0x0)))))
;
104
105 result = smu7_upload_smu_firmware_image(hwmgr);
106 if (result)
107 return result;
108
109 /* Clear status */
110 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xe0003088,0))
111 ixSMU_STATUS, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xe0003088,0))
;
112
113 /* Enable clock */
114 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x80000004,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0x80000004))) & ~0x1) | (0x1 &
((0) << 0x0)))))
115 SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x80000004,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0x80000004))) & ~0x1) | (0x1 &
((0) << 0x0)))))
;
116
117 /* De-assert reset */
118 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 &
((0) << 0x0)))))
119 SMC_SYSCON_RESET_CNTL, rst_reg, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 &
((0) << 0x0)))))
;
120
121 /* Set SMU Auto Start */
122 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xe00030b8,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0xe00030b8))) & ~0x80000000) | (0x80000000
& ((1) << 0x1f)))))
123 SMU_INPUT_DATA, AUTO_START, 1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xe00030b8,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0xe00030b8))) & ~0x80000000) | (0x80000000
& ((1) << 0x1f)))))
;
124
125 /* Clear firmware interrupt enable flag */
126 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x3f800,0))
127 ixFIRMWARE_FLAGS, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x3f800,0))
;
128
129 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,phm_wait_on_indirect_register(hwmgr, 0x1AC, 0xc0000004, (1) <<
0x10, 0x10000)
130 RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1)phm_wait_on_indirect_register(hwmgr, 0x1AC, 0xc0000004, (1) <<
0x10, 0x10000)
;
131
132 /**
133 * Call Test SMU message with 0x20000 offset to trigger SMU start
134 */
135 smu7_send_msg_to_smc_offset(hwmgr);
136
137 /* Wait for done bit to be set */
138 PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,phm_wait_for_indirect_register_unequal(hwmgr, 0x1AC, 0xe0003088
, (0) << 0x0, 0x1)
139 SMU_STATUS, SMU_DONE, 0)phm_wait_for_indirect_register_unequal(hwmgr, 0x1AC, 0xe0003088
, (0) << 0x0, 0x1)
;
140
141 /* Check pass/failed indicator */
142 if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xe0003088))) & 0x2) >>
0x1)
143 CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xe0003088))) & 0x2) >>
0x1)
) {
144 pr_err("SMU Firmware start failed\n")printk("\0013" "amdgpu: " "SMU Firmware start failed\n");
145 return -EINVAL22;
146 }
147
148 /* Wait for firmware to initialize */
149 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,phm_wait_on_indirect_register(hwmgr, 0x1AC, 0x3f800, (1) <<
0x0, 0x1)
150 FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1)phm_wait_on_indirect_register(hwmgr, 0x1AC, 0x3f800, (1) <<
0x0, 0x1)
;
151
152 return 0;
153}
154
155static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr)
156{
157 int result = 0;
158
159 /* wait for smc boot up */
160 PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,phm_wait_for_indirect_register_unequal(hwmgr, 0x1AC, 0xc0000004
, (0) << 0x7, 0x80)
161 RCU_UC_EVENTS, boot_seq_done, 0)phm_wait_for_indirect_register_unequal(hwmgr, 0x1AC, 0xc0000004
, (0) << 0x7, 0x80)
;
162
163 /*Clear firmware interrupt enable flag*/
164 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x3f800,0))
165 ixFIRMWARE_FLAGS, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x3f800,0))
;
166
167
168 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 &
((1) << 0x0)))))
169 SMC_SYSCON_RESET_CNTL, rst_reg, 1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 &
((1) << 0x0)))))
;
170
171 result = smu7_upload_smu_firmware_image(hwmgr);
172
173 if (result != 0)
174 return result;
175
176 /* Set smc instruct start point at 0x0 */
177 smu7_program_jump_on_start(hwmgr);
178
179
180 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x80000004,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0x80000004))) & ~0x1) | (0x1 &
((0) << 0x0)))))
181 SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x80000004,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0x80000004))) & ~0x1) | (0x1 &
((0) << 0x0)))))
;
182
183 /*De-assert reset*/
184 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 &
((0) << 0x0)))))
185 SMC_SYSCON_RESET_CNTL, rst_reg, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 &
((0) << 0x0)))))
;
186
187 /* Wait for firmware to initialize */
188 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,phm_wait_on_indirect_register(hwmgr, 0x1AC, 0x3f800, (1) <<
0x0, 0x1)
189 FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1)phm_wait_on_indirect_register(hwmgr, 0x1AC, 0x3f800, (1) <<
0x0, 0x1)
;
190
191 return result;
192}
193
194static int tonga_start_smu(struct pp_hwmgr *hwmgr)
195{
196 struct tonga_smumgr *priv = hwmgr->smu_backend;
197 int result;
198
199 /* Only start SMC if SMC RAM is not running */
200 if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
201 /*Check if SMU is running in protected mode*/
202 if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xe00030a4))) & 0x10000
) >> 0x10)
203 SMU_FIRMWARE, SMU_MODE)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xe00030a4))) & 0x10000
) >> 0x10)
) {
204 result = tonga_start_in_non_protection_mode(hwmgr);
205 if (result)
206 return result;
207 } else {
208 result = tonga_start_in_protection_mode(hwmgr);
209 if (result)
210 return result;
211 }
212 }
213
214 /* Setup SoftRegsStart here to visit the register UcodeLoadStatus
215 * to check fw loading state
216 */
217 smu7_read_smc_sram_dword(hwmgr,
218 SMU72_FIRMWARE_HEADER_LOCATION0x20000 +
219 offsetof(SMU72_Firmware_Header, SoftRegisters)__builtin_offsetof(SMU72_Firmware_Header, SoftRegisters),
220 &(priv->smu7_data.soft_regs_start), 0x40000);
221
222 result = smu7_request_smu_load_fw(hwmgr);
223
224 return result;
225}
226
227static int tonga_smu_init(struct pp_hwmgr *hwmgr)
228{
229 struct tonga_smumgr *tonga_priv = NULL((void *)0);
230
231 tonga_priv = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL(0x0001 | 0x0004));
232 if (tonga_priv == NULL((void *)0))
233 return -ENOMEM12;
234
235 hwmgr->smu_backend = tonga_priv;
236
237 if (smu7_init(hwmgr)) {
238 kfree(tonga_priv);
239 return -EINVAL22;
240 }
241
242 return 0;
243}
244
245
246static int tonga_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
247 phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
248 uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
249{
250 uint32_t i = 0;
251 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
252 struct phm_ppt_v1_information *pptable_info =
253 (struct phm_ppt_v1_information *)(hwmgr->pptable);
254
255 /* clock - voltage dependency table is empty table */
256 if (allowed_clock_voltage_table->count == 0)
257 return -EINVAL22;
258
259 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
260 /* find first sclk bigger than request */
261 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
262 voltage->VddGfx = phm_get_voltage_index(
263 pptable_info->vddgfx_lookup_table,
264 allowed_clock_voltage_table->entries[i].vddgfx);
265 voltage->Vddc = phm_get_voltage_index(
266 pptable_info->vddc_lookup_table,
267 allowed_clock_voltage_table->entries[i].vddc);
268
269 if (allowed_clock_voltage_table->entries[i].vddci)
270 voltage->Vddci =
271 phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
272 else
273 voltage->Vddci =
274 phm_get_voltage_id(&data->vddci_voltage_table,
275 allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA200);
276
277
278 if (allowed_clock_voltage_table->entries[i].mvdd)
279 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
280
281 voltage->Phases = 1;
282 return 0;
283 }
284 }
285
286 /* sclk is bigger than max sclk in the dependence table */
287 voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
288 allowed_clock_voltage_table->entries[i-1].vddgfx);
289 voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
290 allowed_clock_voltage_table->entries[i-1].vddc);
291
292 if (allowed_clock_voltage_table->entries[i-1].vddci)
293 voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
294 allowed_clock_voltage_table->entries[i-1].vddci);
295
296 if (allowed_clock_voltage_table->entries[i-1].mvdd)
297 *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
298
299 return 0;
300}
301
302static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
303 SMU72_Discrete_DpmTable *table)
304{
305 unsigned int count;
306 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
307
308 if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->voltage_control) {
309 table->VddcLevelCount = data->vddc_voltage_table.count;
310 for (count = 0; count < table->VddcLevelCount; count++) {
311 table->VddcTable[count] =
312 PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE)(__uint16_t)(__builtin_constant_p(data->vddc_voltage_table
.entries[count].value * 4) ? (__uint16_t)(((__uint16_t)(data->
vddc_voltage_table.entries[count].value * 4) & 0xffU) <<
8 | ((__uint16_t)(data->vddc_voltage_table.entries[count]
.value * 4) & 0xff00U) >> 8) : __swap16md(data->
vddc_voltage_table.entries[count].value * 4))
;
313 }
314 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount)((table->VddcLevelCount) = (__uint32_t)(__builtin_constant_p
(table->VddcLevelCount) ? (__uint32_t)(((__uint32_t)(table
->VddcLevelCount) & 0xff) << 24 | ((__uint32_t)(
table->VddcLevelCount) & 0xff00) << 8 | ((__uint32_t
)(table->VddcLevelCount) & 0xff0000) >> 8 | ((__uint32_t
)(table->VddcLevelCount) & 0xff000000) >> 24) : __swap32md
(table->VddcLevelCount)))
;
315 }
316 return 0;
317}
318
319static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
320 SMU72_Discrete_DpmTable *table)
321{
322 unsigned int count;
323 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
324
325 if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->vdd_gfx_control) {
326 table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
327 for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
328 table->VddGfxTable[count] =
329 PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE)(__uint16_t)(__builtin_constant_p(data->vddgfx_voltage_table
.entries[count].value * 4) ? (__uint16_t)(((__uint16_t)(data->
vddgfx_voltage_table.entries[count].value * 4) & 0xffU) <<
8 | ((__uint16_t)(data->vddgfx_voltage_table.entries[count
].value * 4) & 0xff00U) >> 8) : __swap16md(data->
vddgfx_voltage_table.entries[count].value * 4))
;
330 }
331 CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount)((table->VddGfxLevelCount) = (__uint32_t)(__builtin_constant_p
(table->VddGfxLevelCount) ? (__uint32_t)(((__uint32_t)(table
->VddGfxLevelCount) & 0xff) << 24 | ((__uint32_t
)(table->VddGfxLevelCount) & 0xff00) << 8 | ((__uint32_t
)(table->VddGfxLevelCount) & 0xff0000) >> 8 | ((
__uint32_t)(table->VddGfxLevelCount) & 0xff000000) >>
24) : __swap32md(table->VddGfxLevelCount)))
;
332 }
333 return 0;
334}
335
336static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
337 SMU72_Discrete_DpmTable *table)
338{
339 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
340 uint32_t count;
341
342 table->VddciLevelCount = data->vddci_voltage_table.count;
343 for (count = 0; count < table->VddciLevelCount; count++) {
344 if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->vddci_control) {
345 table->VddciTable[count] =
346 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE)(__uint16_t)(__builtin_constant_p(data->vddci_voltage_table
.entries[count].value * 4) ? (__uint16_t)(((__uint16_t)(data->
vddci_voltage_table.entries[count].value * 4) & 0xffU) <<
8 | ((__uint16_t)(data->vddci_voltage_table.entries[count
].value * 4) & 0xff00U) >> 8) : __swap16md(data->
vddci_voltage_table.entries[count].value * 4))
;
347 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO0x1 == data->vddci_control) {
348 table->SmioTable1.Pattern[count].Voltage =
349 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE)(__uint16_t)(__builtin_constant_p(data->vddci_voltage_table
.entries[count].value * 4) ? (__uint16_t)(((__uint16_t)(data->
vddci_voltage_table.entries[count].value * 4) & 0xffU) <<
8 | ((__uint16_t)(data->vddci_voltage_table.entries[count
].value * 4) & 0xff00U) >> 8) : __swap16md(data->
vddci_voltage_table.entries[count].value * 4))
;
350 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
351 table->SmioTable1.Pattern[count].Smio =
352 (uint8_t) count;
353 table->Smio[count] |=
354 data->vddci_voltage_table.entries[count].smio_low;
355 table->VddciTable[count] =
356 PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE)(__uint16_t)(__builtin_constant_p(data->vddci_voltage_table
.entries[count].value * 4) ? (__uint16_t)(((__uint16_t)(data->
vddci_voltage_table.entries[count].value * 4) & 0xffU) <<
8 | ((__uint16_t)(data->vddci_voltage_table.entries[count
].value * 4) & 0xff00U) >> 8) : __swap16md(data->
vddci_voltage_table.entries[count].value * 4))
;
357 }
358 }
359
360 table->SmioMask1 = data->vddci_voltage_table.mask_low;
361 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount)((table->VddciLevelCount) = (__uint32_t)(__builtin_constant_p
(table->VddciLevelCount) ? (__uint32_t)(((__uint32_t)(table
->VddciLevelCount) & 0xff) << 24 | ((__uint32_t)
(table->VddciLevelCount) & 0xff00) << 8 | ((__uint32_t
)(table->VddciLevelCount) & 0xff0000) >> 8 | ((__uint32_t
)(table->VddciLevelCount) & 0xff000000) >> 24) :
__swap32md(table->VddciLevelCount)))
;
362
363 return 0;
364}
365
366static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
367 SMU72_Discrete_DpmTable *table)
368{
369 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
370 uint32_t count;
371
372 if (SMU7_VOLTAGE_CONTROL_BY_GPIO0x1 == data->mvdd_control) {
373 table->MvddLevelCount = data->mvdd_voltage_table.count;
374 for (count = 0; count < table->MvddLevelCount; count++) {
375 table->SmioTable2.Pattern[count].Voltage =
376 PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE)(__uint16_t)(__builtin_constant_p(data->mvdd_voltage_table
.entries[count].value * 4) ? (__uint16_t)(((__uint16_t)(data->
mvdd_voltage_table.entries[count].value * 4) & 0xffU) <<
8 | ((__uint16_t)(data->mvdd_voltage_table.entries[count]
.value * 4) & 0xff00U) >> 8) : __swap16md(data->
mvdd_voltage_table.entries[count].value * 4))
;
377 /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
378 table->SmioTable2.Pattern[count].Smio =
379 (uint8_t) count;
380 table->Smio[count] |=
381 data->mvdd_voltage_table.entries[count].smio_low;
382 }
383 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
384
385 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount)((table->MvddLevelCount) = (__uint32_t)(__builtin_constant_p
(table->MvddLevelCount) ? (__uint32_t)(((__uint32_t)(table
->MvddLevelCount) & 0xff) << 24 | ((__uint32_t)(
table->MvddLevelCount) & 0xff00) << 8 | ((__uint32_t
)(table->MvddLevelCount) & 0xff0000) >> 8 | ((__uint32_t
)(table->MvddLevelCount) & 0xff000000) >> 24) : __swap32md
(table->MvddLevelCount)))
;
386 }
387
388 return 0;
389}
390
391static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
392 SMU72_Discrete_DpmTable *table)
393{
394 uint32_t count;
395 uint8_t index = 0;
396 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
397 struct phm_ppt_v1_information *pptable_info =
398 (struct phm_ppt_v1_information *)(hwmgr->pptable);
399 struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
400 pptable_info->vddgfx_lookup_table;
401 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
402 pptable_info->vddc_lookup_table;
403
404 /* table is already swapped, so in order to use the value from it
405 * we need to swap it back.
406 */
407 uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount)(__uint32_t)(__builtin_constant_p(table->VddcLevelCount) ?
(__uint32_t)(((__uint32_t)(table->VddcLevelCount) & 0xff
) << 24 | ((__uint32_t)(table->VddcLevelCount) &
0xff00) << 8 | ((__uint32_t)(table->VddcLevelCount)
& 0xff0000) >> 8 | ((__uint32_t)(table->VddcLevelCount
) & 0xff000000) >> 24) : __swap32md(table->VddcLevelCount
))
;
408 uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount)(__uint32_t)(__builtin_constant_p(table->VddGfxLevelCount)
? (__uint32_t)(((__uint32_t)(table->VddGfxLevelCount) &
0xff) << 24 | ((__uint32_t)(table->VddGfxLevelCount
) & 0xff00) << 8 | ((__uint32_t)(table->VddGfxLevelCount
) & 0xff0000) >> 8 | ((__uint32_t)(table->VddGfxLevelCount
) & 0xff000000) >> 24) : __swap32md(table->VddGfxLevelCount
))
;
409
410 for (count = 0; count < vddc_level_count; count++) {
411 /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
412 index = phm_get_voltage_index(vddc_lookup_table,
413 data->vddc_voltage_table.entries[count].value);
414 table->BapmVddcVidLoSidd[count] =
415 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
416 table->BapmVddcVidHiSidd[count] =
417 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
418 table->BapmVddcVidHiSidd2[count] =
419 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
420 }
421
422 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID20x2) {
423 /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
424 for (count = 0; count < vddgfx_level_count; count++) {
425 index = phm_get_voltage_index(vddgfx_lookup_table,
426 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
427 table->BapmVddGfxVidHiSidd2[count] =
428 convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
429 }
430 } else {
431 for (count = 0; count < vddc_level_count; count++) {
432 index = phm_get_voltage_index(vddc_lookup_table,
433 data->vddc_voltage_table.entries[count].value);
434 table->BapmVddGfxVidLoSidd[count] =
435 convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
436 table->BapmVddGfxVidHiSidd[count] =
437 convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
438 table->BapmVddGfxVidHiSidd2[count] =
439 convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
440 }
441 }
442
443 return 0;
444}
445
446static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
447 SMU72_Discrete_DpmTable *table)
448{
449 int result;
450
451 result = tonga_populate_smc_vddc_table(hwmgr, table);
452 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate VDDC voltage table to SMC"
); return -22; } } while (0)
453 "can not populate VDDC voltage table to SMC",do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate VDDC voltage table to SMC"
); return -22; } } while (0)
454 return -EINVAL)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate VDDC voltage table to SMC"
); return -22; } } while (0)
;
455
456 result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
457 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate VDDCI voltage table to SMC"
); return -22; } } while (0)
458 "can not populate VDDCI voltage table to SMC",do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate VDDCI voltage table to SMC"
); return -22; } } while (0)
459 return -EINVAL)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate VDDCI voltage table to SMC"
); return -22; } } while (0)
;
460
461 result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
462 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate VDDGFX voltage table to SMC"
); return -22; } } while (0)
463 "can not populate VDDGFX voltage table to SMC",do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate VDDGFX voltage table to SMC"
); return -22; } } while (0)
464 return -EINVAL)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate VDDGFX voltage table to SMC"
); return -22; } } while (0)
;
465
466 result = tonga_populate_smc_mvdd_table(hwmgr, table);
467 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate MVDD voltage table to SMC"
); return -22; } } while (0)
468 "can not populate MVDD voltage table to SMC",do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate MVDD voltage table to SMC"
); return -22; } } while (0)
469 return -EINVAL)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate MVDD voltage table to SMC"
); return -22; } } while (0)
;
470
471 result = tonga_populate_cac_tables(hwmgr, table);
472 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate CAC voltage tables to SMC"
); return -22; } } while (0)
473 "can not populate CAC voltage tables to SMC",do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate CAC voltage tables to SMC"
); return -22; } } while (0)
474 return -EINVAL)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate CAC voltage tables to SMC"
); return -22; } } while (0)
;
475
476 return 0;
477}
478
479static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
480 struct SMU72_Discrete_Ulv *state)
481{
482 struct phm_ppt_v1_information *table_info =
483 (struct phm_ppt_v1_information *)(hwmgr->pptable);
484
485 state->CcPwrDynRm = 0;
486 state->CcPwrDynRm1 = 0;
487
488 state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
489 state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
490 VOLTAGE_VID_OFFSET_SCALE2100 / VOLTAGE_VID_OFFSET_SCALE1625);
491
492 state->VddcPhase = 1;
493
494 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm)((state->CcPwrDynRm) = (__uint32_t)(__builtin_constant_p(state
->CcPwrDynRm) ? (__uint32_t)(((__uint32_t)(state->CcPwrDynRm
) & 0xff) << 24 | ((__uint32_t)(state->CcPwrDynRm
) & 0xff00) << 8 | ((__uint32_t)(state->CcPwrDynRm
) & 0xff0000) >> 8 | ((__uint32_t)(state->CcPwrDynRm
) & 0xff000000) >> 24) : __swap32md(state->CcPwrDynRm
)))
;
495 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1)((state->CcPwrDynRm1) = (__uint32_t)(__builtin_constant_p(
state->CcPwrDynRm1) ? (__uint32_t)(((__uint32_t)(state->
CcPwrDynRm1) & 0xff) << 24 | ((__uint32_t)(state->
CcPwrDynRm1) & 0xff00) << 8 | ((__uint32_t)(state->
CcPwrDynRm1) & 0xff0000) >> 8 | ((__uint32_t)(state
->CcPwrDynRm1) & 0xff000000) >> 24) : __swap32md
(state->CcPwrDynRm1)))
;
496 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset)((state->VddcOffset) = (__uint16_t)(__builtin_constant_p(state
->VddcOffset) ? (__uint16_t)(((__uint16_t)(state->VddcOffset
) & 0xffU) << 8 | ((__uint16_t)(state->VddcOffset
) & 0xff00U) >> 8) : __swap16md(state->VddcOffset
)))
;
497
498 return 0;
499}
500
501static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
502 struct SMU72_Discrete_DpmTable *table)
503{
504 return tonga_populate_ulv_level(hwmgr, &table->Ulv);
505}
506
507static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
508{
509 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
510 struct smu7_dpm_table *dpm_table = &data->dpm_table;
511 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
512 uint32_t i;
513
514 /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
515 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
516 table->LinkLevel[i].PcieGenSpeed =
517 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
518 table->LinkLevel[i].PcieLaneCount =
519 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
520 table->LinkLevel[i].EnabledForActivity =
521 1;
522 table->LinkLevel[i].SPC =
523 (uint8_t)(data->pcie_spc_cap & 0xff);
524 table->LinkLevel[i].DownThreshold =
525 PP_HOST_TO_SMC_UL(5)(__uint32_t)(__builtin_constant_p(5) ? (__uint32_t)(((__uint32_t
)(5) & 0xff) << 24 | ((__uint32_t)(5) & 0xff00)
<< 8 | ((__uint32_t)(5) & 0xff0000) >> 8 | (
(__uint32_t)(5) & 0xff000000) >> 24) : __swap32md(5
))
;
526 table->LinkLevel[i].UpThreshold =
527 PP_HOST_TO_SMC_UL(30)(__uint32_t)(__builtin_constant_p(30) ? (__uint32_t)(((__uint32_t
)(30) & 0xff) << 24 | ((__uint32_t)(30) & 0xff00
) << 8 | ((__uint32_t)(30) & 0xff0000) >> 8 |
((__uint32_t)(30) & 0xff000000) >> 24) : __swap32md
(30))
;
528 }
529
530 smu_data->smc_state_table.LinkLevelCount =
531 (uint8_t)dpm_table->pcie_speed_table.count;
532 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
533 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
534
535 return 0;
536}
537
538static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
539 uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
540{
541 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
542 pp_atomctrl_clock_dividers_vi dividers;
543 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
544 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
545 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
546 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
547 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
548 uint32_t reference_clock;
549 uint32_t reference_divider;
550 uint32_t fbdiv;
551 int result;
552
553 /* get the engine clock dividers for this clock value*/
554 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
555
556 PP_ASSERT_WITH_CODE(result == 0,do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS."
); return result; } } while (0)
557 "Error retrieving Engine Clock dividers from VBIOS.", return result)do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS."
); return result; } } while (0)
;
558
559 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
560 reference_clock = atomctrl_get_reference_clock(hwmgr);
561
562 reference_divider = 1 + dividers.uc_pll_ref_div;
563
564 /* low 14 bits is fraction and high 12 bits is divider*/
565 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
566
567 /* SPLL_FUNC_CNTL setup*/
568 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,(((spll_func_cntl) & ~0x7e0) | (0x7e0 & ((dividers.uc_pll_ref_div
) << 0x5)))
569 CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div)(((spll_func_cntl) & ~0x7e0) | (0x7e0 & ((dividers.uc_pll_ref_div
) << 0x5)))
;
570 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,(((spll_func_cntl) & ~0x7f00000) | (0x7f00000 & ((dividers
.uc_pll_post_div) << 0x14)))
571 CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div)(((spll_func_cntl) & ~0x7f00000) | (0x7f00000 & ((dividers
.uc_pll_post_div) << 0x14)))
;
572
573 /* SPLL_FUNC_CNTL_3 setup*/
574 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,(((spll_func_cntl_3) & ~0x3ffffff) | (0x3ffffff & ((fbdiv
) << 0x0)))
575 CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv)(((spll_func_cntl_3) & ~0x3ffffff) | (0x3ffffff & ((fbdiv
) << 0x0)))
;
576
577 /* set to use fractional accumulation*/
578 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,(((spll_func_cntl_3) & ~0x10000000) | (0x10000000 & (
(1) << 0x1c)))
579 CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1)(((spll_func_cntl_3) & ~0x10000000) | (0x10000000 & (
(1) << 0x1c)))
;
580
581 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
582 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
583 pp_atomctrl_internal_ss_info ss_info;
584
585 uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
586 if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
587 /*
588 * ss_info.speed_spectrum_percentage -- in unit of 0.01%
589 * ss_info.speed_spectrum_rate -- in unit of khz
590 */
591 /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
592 uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
593
594 /* clkv = 2 * D * fbdiv / NS */
595 uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
596
597 cg_spll_spread_spectrum =
598 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS)(((cg_spll_spread_spectrum) & ~0xfff0) | (0xfff0 & ((
clkS) << 0x4)))
;
599 cg_spll_spread_spectrum =
600 PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1)(((cg_spll_spread_spectrum) & ~0x1) | (0x1 & ((1) <<
0x0)))
;
601 cg_spll_spread_spectrum_2 =
602 PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV)(((cg_spll_spread_spectrum_2) & ~0x3ffffff) | (0x3ffffff &
((clkV) << 0x0)))
;
603 }
604 }
605
606 sclk->SclkFrequency = engine_clock;
607 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
608 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
609 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
610 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
611 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
612
613 return 0;
614}
615
616static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
617 uint32_t engine_clock,
618 SMU72_Discrete_GraphicsLevel *graphic_level)
619{
620 int result;
621 uint32_t mvdd;
622 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
623 struct phm_ppt_v1_information *pptable_info =
624 (struct phm_ppt_v1_information *)(hwmgr->pptable);
625 phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL((void *)0);
626
627 result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
628
629 if (hwmgr->od_enabled)
630 vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_sclk;
631 else
632 vdd_dep_table = pptable_info->vdd_dep_on_sclk;
633
634 /* populate graphics levels*/
635 result = tonga_get_dependency_volt_by_clk(hwmgr,
636 vdd_dep_table, engine_clock,
637 &graphic_level->MinVoltage, &mvdd);
638 PP_ASSERT_WITH_CODE((!result),do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find VDDC voltage value for VDDC "
"engine clock dependency table"); return result; } } while (
0)
639 "can not find VDDC voltage value for VDDC "do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find VDDC voltage value for VDDC "
"engine clock dependency table"); return result; } } while (
0)
640 "engine clock dependency table", return result)do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find VDDC voltage value for VDDC "
"engine clock dependency table"); return result; } } while (
0)
;
641
642 /* SCLK frequency in units of 10KHz*/
643 graphic_level->SclkFrequency = engine_clock;
644 /* Indicates maximum activity level for this performance level. 50% for now*/
645 graphic_level->ActivityLevel = data->current_profile_setting.sclk_activity;
646
647 graphic_level->CcPwrDynRm = 0;
648 graphic_level->CcPwrDynRm1 = 0;
649 /* this level can be used if activity is high enough.*/
650 graphic_level->EnabledForActivity = 0;
651 /* this level can be used for throttling.*/
652 graphic_level->EnabledForThrottle = 1;
653 graphic_level->UpHyst = data->current_profile_setting.sclk_up_hyst;
654 graphic_level->DownHyst = data->current_profile_setting.sclk_down_hyst;
655 graphic_level->VoltageDownHyst = 0;
656 graphic_level->PowerThrottle = 0;
657
658 data->display_timing.min_clock_in_sr =
659 hwmgr->display_config->min_core_set_clock_in_sr;
660
661 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
662 PHM_PlatformCaps_SclkDeepSleep))
663 graphic_level->DeepSleepDivId =
664 smu7_get_sleep_divider_id_from_clock(engine_clock,
665 data->display_timing.min_clock_in_sr);
666
667 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
668 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW0;
669
670 if (!result) {
671 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
672 /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
673 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency)((graphic_level->SclkFrequency) = (__uint32_t)(__builtin_constant_p
(graphic_level->SclkFrequency) ? (__uint32_t)(((__uint32_t
)(graphic_level->SclkFrequency) & 0xff) << 24 | (
(__uint32_t)(graphic_level->SclkFrequency) & 0xff00) <<
8 | ((__uint32_t)(graphic_level->SclkFrequency) & 0xff0000
) >> 8 | ((__uint32_t)(graphic_level->SclkFrequency)
& 0xff000000) >> 24) : __swap32md(graphic_level->
SclkFrequency)))
;
674 CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel)((graphic_level->ActivityLevel) = (__uint16_t)(__builtin_constant_p
(graphic_level->ActivityLevel) ? (__uint16_t)(((__uint16_t
)(graphic_level->ActivityLevel) & 0xffU) << 8 | (
(__uint16_t)(graphic_level->ActivityLevel) & 0xff00U) >>
8) : __swap16md(graphic_level->ActivityLevel)))
;
675 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3)((graphic_level->CgSpllFuncCntl3) = (__uint32_t)(__builtin_constant_p
(graphic_level->CgSpllFuncCntl3) ? (__uint32_t)(((__uint32_t
)(graphic_level->CgSpllFuncCntl3) & 0xff) << 24 |
((__uint32_t)(graphic_level->CgSpllFuncCntl3) & 0xff00
) << 8 | ((__uint32_t)(graphic_level->CgSpllFuncCntl3
) & 0xff0000) >> 8 | ((__uint32_t)(graphic_level->
CgSpllFuncCntl3) & 0xff000000) >> 24) : __swap32md(
graphic_level->CgSpllFuncCntl3)))
;
676 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4)((graphic_level->CgSpllFuncCntl4) = (__uint32_t)(__builtin_constant_p
(graphic_level->CgSpllFuncCntl4) ? (__uint32_t)(((__uint32_t
)(graphic_level->CgSpllFuncCntl4) & 0xff) << 24 |
((__uint32_t)(graphic_level->CgSpllFuncCntl4) & 0xff00
) << 8 | ((__uint32_t)(graphic_level->CgSpllFuncCntl4
) & 0xff0000) >> 8 | ((__uint32_t)(graphic_level->
CgSpllFuncCntl4) & 0xff000000) >> 24) : __swap32md(
graphic_level->CgSpllFuncCntl4)))
;
677 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum)((graphic_level->SpllSpreadSpectrum) = (__uint32_t)(__builtin_constant_p
(graphic_level->SpllSpreadSpectrum) ? (__uint32_t)(((__uint32_t
)(graphic_level->SpllSpreadSpectrum) & 0xff) << 24
| ((__uint32_t)(graphic_level->SpllSpreadSpectrum) & 0xff00
) << 8 | ((__uint32_t)(graphic_level->SpllSpreadSpectrum
) & 0xff0000) >> 8 | ((__uint32_t)(graphic_level->
SpllSpreadSpectrum) & 0xff000000) >> 24) : __swap32md
(graphic_level->SpllSpreadSpectrum)))
;
678 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2)((graphic_level->SpllSpreadSpectrum2) = (__uint32_t)(__builtin_constant_p
(graphic_level->SpllSpreadSpectrum2) ? (__uint32_t)(((__uint32_t
)(graphic_level->SpllSpreadSpectrum2) & 0xff) <<
24 | ((__uint32_t)(graphic_level->SpllSpreadSpectrum2) &
0xff00) << 8 | ((__uint32_t)(graphic_level->SpllSpreadSpectrum2
) & 0xff0000) >> 8 | ((__uint32_t)(graphic_level->
SpllSpreadSpectrum2) & 0xff000000) >> 24) : __swap32md
(graphic_level->SpllSpreadSpectrum2)))
;
679 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm)((graphic_level->CcPwrDynRm) = (__uint32_t)(__builtin_constant_p
(graphic_level->CcPwrDynRm) ? (__uint32_t)(((__uint32_t)(graphic_level
->CcPwrDynRm) & 0xff) << 24 | ((__uint32_t)(graphic_level
->CcPwrDynRm) & 0xff00) << 8 | ((__uint32_t)(graphic_level
->CcPwrDynRm) & 0xff0000) >> 8 | ((__uint32_t)(graphic_level
->CcPwrDynRm) & 0xff000000) >> 24) : __swap32md(
graphic_level->CcPwrDynRm)))
;
680 CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1)((graphic_level->CcPwrDynRm1) = (__uint32_t)(__builtin_constant_p
(graphic_level->CcPwrDynRm1) ? (__uint32_t)(((__uint32_t)(
graphic_level->CcPwrDynRm1) & 0xff) << 24 | ((__uint32_t
)(graphic_level->CcPwrDynRm1) & 0xff00) << 8 | (
(__uint32_t)(graphic_level->CcPwrDynRm1) & 0xff0000) >>
8 | ((__uint32_t)(graphic_level->CcPwrDynRm1) & 0xff000000
) >> 24) : __swap32md(graphic_level->CcPwrDynRm1)))
;
681 }
682
683 return result;
684}
685
686static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
687{
688 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
689 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
690 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
691 struct smu7_dpm_table *dpm_table = &data->dpm_table;
692 struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
693 uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
694 uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
695 offsetof(SMU72_Discrete_DpmTable, GraphicsLevel)__builtin_offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
696
697 uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
698 SMU72_MAX_LEVELS_GRAPHICS8;
699
700 SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
701
702 uint32_t i, max_entry;
703 uint8_t highest_pcie_level_enabled = 0;
704 uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
705 uint8_t count = 0;
706 int result = 0;
707
708 memset(levels, 0x00, level_array_size)__builtin_memset((levels), (0x00), (level_array_size));
709
710 for (i = 0; i < dpm_table->sclk_table.count; i++) {
711 result = tonga_populate_single_graphic_level(hwmgr,
712 dpm_table->sclk_table.dpm_levels[i].value,
713 &(smu_data->smc_state_table.GraphicsLevel[i]));
714 if (result != 0)
715 return result;
716
717 /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
718 if (i > 1)
719 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
720 }
721
722 /* Only enable level 0 for now. */
723 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
724
725 /* set highest level watermark to high */
726 if (dpm_table->sclk_table.count > 1)
727 smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
728 PPSMC_DISPLAY_WATERMARK_HIGH1;
729
730 smu_data->smc_state_table.GraphicsDpmLevelCount =
731 (uint8_t)dpm_table->sclk_table.count;
732 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
733 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
734
735 if (pcie_table != NULL((void *)0)) {
736 PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),do { if (!((pcie_entry_count >= 1))) { printk("\0014" "amdgpu: "
"%s\n", "There must be 1 or more PCIE levels defined in PPTable."
); return -22; } } while (0)
737 "There must be 1 or more PCIE levels defined in PPTable.",do { if (!((pcie_entry_count >= 1))) { printk("\0014" "amdgpu: "
"%s\n", "There must be 1 or more PCIE levels defined in PPTable."
); return -22; } } while (0)
738 return -EINVAL)do { if (!((pcie_entry_count >= 1))) { printk("\0014" "amdgpu: "
"%s\n", "There must be 1 or more PCIE levels defined in PPTable."
); return -22; } } while (0)
;
739 max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
740 for (i = 0; i < dpm_table->sclk_table.count; i++) {
741 smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
742 (uint8_t) ((i < max_entry) ? i : max_entry);
743 }
744 } else {
745 if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
746 pr_err("Pcie Dpm Enablemask is 0 !")printk("\0013" "amdgpu: " "Pcie Dpm Enablemask is 0 !");
747
748 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
749 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
750 (1<<(highest_pcie_level_enabled+1))) != 0)) {
751 highest_pcie_level_enabled++;
752 }
753
754 while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
755 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
756 (1<<lowest_pcie_level_enabled)) == 0)) {
757 lowest_pcie_level_enabled++;
758 }
759
760 while ((count < highest_pcie_level_enabled) &&
761 ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
762 (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
763 count++;
764 }
765 mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
766 (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
767
768
769 /* set pcieDpmLevel to highest_pcie_level_enabled*/
770 for (i = 2; i < dpm_table->sclk_table.count; i++)
771 smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
772
773 /* set pcieDpmLevel to lowest_pcie_level_enabled*/
774 smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
775
776 /* set pcieDpmLevel to mid_pcie_level_enabled*/
777 smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
778 }
779 /* level count will send to smc once at init smc table and never change*/
780 result = smu7_copy_bytes_to_smc(hwmgr, level_array_address,
781 (uint8_t *)levels, (uint32_t)level_array_size,
782 SMC_RAM_END0x40000);
783
784 return result;
785}
786
787static int tonga_calculate_mclk_params(
788 struct pp_hwmgr *hwmgr,
789 uint32_t memory_clock,
790 SMU72_Discrete_MemoryLevel *mclk,
791 bool_Bool strobe_mode,
792 bool_Bool dllStateOn
793 )
794{
795 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
796
797 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
798 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
799 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
800 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
801 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
802 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
803 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
804 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
805 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
806
807 pp_atomctrl_memory_clock_param mpll_param;
808 int result;
809
810 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
811 memory_clock, &mpll_param, strobe_mode);
812 PP_ASSERT_WITH_CODE(do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Memory Clock Parameters from VBIOS."
); return result; } } while (0)
813 !result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Memory Clock Parameters from VBIOS."
); return result; } } while (0)
814 "Error retrieving Memory Clock Parameters from VBIOS.",do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Memory Clock Parameters from VBIOS."
); return result; } } while (0)
815 return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Memory Clock Parameters from VBIOS."
); return result; } } while (0)
;
816
817 /* MPLL_FUNC_CNTL setup*/
818 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,(((mpll_func_cntl) & ~0xff00000) | (0xff00000 & ((mpll_param
.bw_ctrl) << 0x14)))
819 mpll_param.bw_ctrl)(((mpll_func_cntl) & ~0xff00000) | (0xff00000 & ((mpll_param
.bw_ctrl) << 0x14)))
;
820
821 /* MPLL_FUNC_CNTL_1 setup*/
822 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,(((mpll_func_cntl_1) & ~0xfff0000) | (0xfff0000 & ((mpll_param
.mpll_fb_divider.cl_kf) << 0x10)))
823 MPLL_FUNC_CNTL_1, CLKF,(((mpll_func_cntl_1) & ~0xfff0000) | (0xfff0000 & ((mpll_param
.mpll_fb_divider.cl_kf) << 0x10)))
824 mpll_param.mpll_fb_divider.cl_kf)(((mpll_func_cntl_1) & ~0xfff0000) | (0xfff0000 & ((mpll_param
.mpll_fb_divider.cl_kf) << 0x10)))
;
825 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,(((mpll_func_cntl_1) & ~0xfff0) | (0xfff0 & ((mpll_param
.mpll_fb_divider.clk_frac) << 0x4)))
826 MPLL_FUNC_CNTL_1, CLKFRAC,(((mpll_func_cntl_1) & ~0xfff0) | (0xfff0 & ((mpll_param
.mpll_fb_divider.clk_frac) << 0x4)))
827 mpll_param.mpll_fb_divider.clk_frac)(((mpll_func_cntl_1) & ~0xfff0) | (0xfff0 & ((mpll_param
.mpll_fb_divider.clk_frac) << 0x4)))
;
828 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,(((mpll_func_cntl_1) & ~0x3) | (0x3 & ((mpll_param.vco_mode
) << 0x0)))
829 MPLL_FUNC_CNTL_1, VCO_MODE,(((mpll_func_cntl_1) & ~0x3) | (0x3 & ((mpll_param.vco_mode
) << 0x0)))
830 mpll_param.vco_mode)(((mpll_func_cntl_1) & ~0x3) | (0x3 & ((mpll_param.vco_mode
) << 0x0)))
;
831
832 /* MPLL_AD_FUNC_CNTL setup*/
833 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,(((mpll_ad_func_cntl) & ~0x7) | (0x7 & ((mpll_param.mpll_post_divider
) << 0x0)))
834 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,(((mpll_ad_func_cntl) & ~0x7) | (0x7 & ((mpll_param.mpll_post_divider
) << 0x0)))
835 mpll_param.mpll_post_divider)(((mpll_ad_func_cntl) & ~0x7) | (0x7 & ((mpll_param.mpll_post_divider
) << 0x0)))
;
836
837 if (data->is_memory_gddr5) {
838 /* MPLL_DQ_FUNC_CNTL setup*/
839 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,(((mpll_dq_func_cntl) & ~0x10) | (0x10 & ((mpll_param
.yclk_sel) << 0x4)))
840 MPLL_DQ_FUNC_CNTL, YCLK_SEL,(((mpll_dq_func_cntl) & ~0x10) | (0x10 & ((mpll_param
.yclk_sel) << 0x4)))
841 mpll_param.yclk_sel)(((mpll_dq_func_cntl) & ~0x10) | (0x10 & ((mpll_param
.yclk_sel) << 0x4)))
;
842 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,(((mpll_dq_func_cntl) & ~0x7) | (0x7 & ((mpll_param.mpll_post_divider
) << 0x0)))
843 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,(((mpll_dq_func_cntl) & ~0x7) | (0x7 & ((mpll_param.mpll_post_divider
) << 0x0)))
844 mpll_param.mpll_post_divider)(((mpll_dq_func_cntl) & ~0x7) | (0x7 & ((mpll_param.mpll_post_divider
) << 0x0)))
;
845 }
846
847 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
848 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
849 /*
850 ************************************
851 Fref = Reference Frequency
852 NF = Feedback divider ratio
853 NR = Reference divider ratio
854 Fnom = Nominal VCO output frequency = Fref * NF / NR
855 Fs = Spreading Rate
856 D = Percentage down-spread / 2
857 Fint = Reference input frequency to PFD = Fref / NR
858 NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
859 CLKS = NS - 1 = ISS_STEP_NUM[11:0]
860 NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
861 CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
862 *************************************
863 */
864 pp_atomctrl_internal_ss_info ss_info;
865 uint32_t freq_nom;
866 uint32_t tmp;
867 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
868
869 /* for GDDR5 for all modes and DDR3 */
870 if (1 == mpll_param.qdr)
871 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
872 else
873 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
874
875 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
876 tmp = (freq_nom / reference_clock);
877 tmp = tmp * tmp;
878
879 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
880 /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
881 /* ss.Info.speed_spectrum_rate -- in unit of khz */
882 /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
883 /* = reference_clock * 5 / speed_spectrum_rate */
884 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
885
886 /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
887 /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
888 uint32_t clkv =
889 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
890 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
891
892 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv)(((mpll_ss1) & ~0x3ffffff) | (0x3ffffff & ((clkv) <<
0x0)))
;
893 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks)(((mpll_ss2) & ~0xfff) | (0xfff & ((clks) << 0x0
)))
;
894 }
895 }
896
897 /* MCLK_PWRMGT_CNTL setup */
898 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,(((mclk_pwrmgt_cntl) & ~0x1f) | (0x1f & ((mpll_param.
dll_speed) << 0x0)))
899 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed)(((mclk_pwrmgt_cntl) & ~0x1f) | (0x1f & ((mpll_param.
dll_speed) << 0x0)))
;
900 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,(((mclk_pwrmgt_cntl) & ~0x100) | (0x100 & ((dllStateOn
) << 0x8)))
901 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn)(((mclk_pwrmgt_cntl) & ~0x100) | (0x100 & ((dllStateOn
) << 0x8)))
;
902 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,(((mclk_pwrmgt_cntl) & ~0x200) | (0x200 & ((dllStateOn
) << 0x9)))
903 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn)(((mclk_pwrmgt_cntl) & ~0x200) | (0x200 & ((dllStateOn
) << 0x9)))
;
904
905 /* Save the result data to outpupt memory level structure */
906 mclk->MclkFrequency = memory_clock;
907 mclk->MpllFuncCntl = mpll_func_cntl;
908 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
909 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
910 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
911 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
912 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
913 mclk->DllCntl = dll_cntl;
914 mclk->MpllSs1 = mpll_ss1;
915 mclk->MpllSs2 = mpll_ss2;
916
917 return 0;
918}
919
920static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
921 bool_Bool strobe_mode)
922{
923 uint8_t mc_para_index;
924
925 if (strobe_mode) {
926 if (memory_clock < 12500)
927 mc_para_index = 0x00;
928 else if (memory_clock > 47500)
929 mc_para_index = 0x0f;
930 else
931 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
932 } else {
933 if (memory_clock < 65000)
934 mc_para_index = 0x00;
935 else if (memory_clock > 135000)
936 mc_para_index = 0x0f;
937 else
938 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
939 }
940
941 return mc_para_index;
942}
943
944static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
945{
946 uint8_t mc_para_index;
947
948 if (memory_clock < 10000)
949 mc_para_index = 0;
950 else if (memory_clock >= 80000)
951 mc_para_index = 0x0f;
952 else
953 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
954
955 return mc_para_index;
956}
957
958
959static int tonga_populate_single_memory_level(
960 struct pp_hwmgr *hwmgr,
961 uint32_t memory_clock,
962 SMU72_Discrete_MemoryLevel *memory_level
963 )
964{
965 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
966 struct phm_ppt_v1_information *pptable_info =
967 (struct phm_ppt_v1_information *)(hwmgr->pptable);
968 uint32_t mclk_edc_wr_enable_threshold = 40000;
969 uint32_t mclk_stutter_mode_threshold = 30000;
970 uint32_t mclk_edc_enable_threshold = 40000;
971 uint32_t mclk_strobe_mode_threshold = 40000;
972 phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL((void *)0);
973 int result = 0;
974 bool_Bool dll_state_on;
975 uint32_t mvdd = 0;
976
977 if (hwmgr->od_enabled)
978 vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
979 else
980 vdd_dep_table = pptable_info->vdd_dep_on_mclk;
981
982 if (NULL((void *)0) != vdd_dep_table) {
983 result = tonga_get_dependency_volt_by_clk(hwmgr,
984 vdd_dep_table,
985 memory_clock,
986 &memory_level->MinVoltage, &mvdd);
987 PP_ASSERT_WITH_CODE(do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not find MinVddc voltage value from memory VDDC "
"voltage dependency table"); return result; } } while (0)
988 !result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not find MinVddc voltage value from memory VDDC "
"voltage dependency table"); return result; } } while (0)
989 "can not find MinVddc voltage value from memory VDDC "do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not find MinVddc voltage value from memory VDDC "
"voltage dependency table"); return result; } } while (0)
990 "voltage dependency table",do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not find MinVddc voltage value from memory VDDC "
"voltage dependency table"); return result; } } while (0)
991 return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "can not find MinVddc voltage value from memory VDDC "
"voltage dependency table"); return result; } } while (0)
;
992 }
993
994 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE0x0)
995 memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
996 else
997 memory_level->MinMvdd = mvdd;
998
999 memory_level->EnabledForThrottle = 1;
1000 memory_level->EnabledForActivity = 0;
1001 memory_level->UpHyst = data->current_profile_setting.mclk_up_hyst;
1002 memory_level->DownHyst = data->current_profile_setting.mclk_down_hyst;
1003 memory_level->VoltageDownHyst = 0;
1004
1005 /* Indicates maximum activity level for this performance level.*/
1006 memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
1007 memory_level->StutterEnable = 0;
1008 memory_level->StrobeEnable = 0;
1009 memory_level->EdcReadEnable = 0;
1010 memory_level->EdcWriteEnable = 0;
1011 memory_level->RttEnable = 0;
1012
1013 /* default set to low watermark. Highest level will be set to high later.*/
1014 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW0;
1015
1016 data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
1017 data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
1018
1019 if ((mclk_stutter_mode_threshold != 0) &&
1020 (memory_clock <= mclk_stutter_mode_threshold) &&
1021 (!data->is_uvd_enabled)
1022 && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE)((((((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0x1b35))) & 0x1) >> 0x0)
& 0x1)
1023 && (data->display_timing.num_existing_displays <= 2)
1024 && (data->display_timing.num_existing_displays != 0))
1025 memory_level->StutterEnable = 1;
1026
1027 /* decide strobe mode*/
1028 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1029 (memory_clock <= mclk_strobe_mode_threshold);
1030
1031 /* decide EDC mode and memory clock ratio*/
1032 if (data->is_memory_gddr5) {
1033 memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
1034 memory_level->StrobeEnable);
1035
1036 if ((mclk_edc_enable_threshold != 0) &&
1037 (memory_clock > mclk_edc_enable_threshold)) {
1038 memory_level->EdcReadEnable = 1;
1039 }
1040
1041 if ((mclk_edc_wr_enable_threshold != 0) &&
1042 (memory_clock > mclk_edc_wr_enable_threshold)) {
1043 memory_level->EdcWriteEnable = 1;
1044 }
1045
1046 if (memory_level->StrobeEnable) {
1047 if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
1048 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7)(((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0xa99))
>> 16) & 0xf)) {
1049 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5)(((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0xa95))
>> 1) & 0x1) ? 1 : 0;
1050 } else {
1051 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6)(((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0xa96))
>> 1) & 0x1) ? 1 : 0;
1052 }
1053
1054 } else {
1055 dll_state_on = data->dll_default_on;
1056 }
1057 } else {
1058 memory_level->StrobeRatio =
1059 tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
1060 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5)(((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0xa95))
>> 1) & 0x1) ? 1 : 0;
1061 }
1062
1063 result = tonga_calculate_mclk_params(hwmgr,
1064 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1065
1066 if (!result) {
1067 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd)((memory_level->MinMvdd) = (__uint32_t)(__builtin_constant_p
(memory_level->MinMvdd) ? (__uint32_t)(((__uint32_t)(memory_level
->MinMvdd) & 0xff) << 24 | ((__uint32_t)(memory_level
->MinMvdd) & 0xff00) << 8 | ((__uint32_t)(memory_level
->MinMvdd) & 0xff0000) >> 8 | ((__uint32_t)(memory_level
->MinMvdd) & 0xff000000) >> 24) : __swap32md(memory_level
->MinMvdd)))
;
1068 /* MCLK frequency in units of 10KHz*/
1069 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency)((memory_level->MclkFrequency) = (__uint32_t)(__builtin_constant_p
(memory_level->MclkFrequency) ? (__uint32_t)(((__uint32_t)
(memory_level->MclkFrequency) & 0xff) << 24 | ((
__uint32_t)(memory_level->MclkFrequency) & 0xff00) <<
8 | ((__uint32_t)(memory_level->MclkFrequency) & 0xff0000
) >> 8 | ((__uint32_t)(memory_level->MclkFrequency) &
0xff000000) >> 24) : __swap32md(memory_level->MclkFrequency
)))
;
1070 /* Indicates maximum activity level for this performance level.*/
1071 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel)((memory_level->ActivityLevel) = (__uint16_t)(__builtin_constant_p
(memory_level->ActivityLevel) ? (__uint16_t)(((__uint16_t)
(memory_level->ActivityLevel) & 0xffU) << 8 | ((
__uint16_t)(memory_level->ActivityLevel) & 0xff00U) >>
8) : __swap16md(memory_level->ActivityLevel)))
;
1072 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl)((memory_level->MpllFuncCntl) = (__uint32_t)(__builtin_constant_p
(memory_level->MpllFuncCntl) ? (__uint32_t)(((__uint32_t)(
memory_level->MpllFuncCntl) & 0xff) << 24 | ((__uint32_t
)(memory_level->MpllFuncCntl) & 0xff00) << 8 | (
(__uint32_t)(memory_level->MpllFuncCntl) & 0xff0000) >>
8 | ((__uint32_t)(memory_level->MpllFuncCntl) & 0xff000000
) >> 24) : __swap32md(memory_level->MpllFuncCntl)))
;
1073 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1)((memory_level->MpllFuncCntl_1) = (__uint32_t)(__builtin_constant_p
(memory_level->MpllFuncCntl_1) ? (__uint32_t)(((__uint32_t
)(memory_level->MpllFuncCntl_1) & 0xff) << 24 | (
(__uint32_t)(memory_level->MpllFuncCntl_1) & 0xff00) <<
8 | ((__uint32_t)(memory_level->MpllFuncCntl_1) & 0xff0000
) >> 8 | ((__uint32_t)(memory_level->MpllFuncCntl_1)
& 0xff000000) >> 24) : __swap32md(memory_level->
MpllFuncCntl_1)))
;
1074 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2)((memory_level->MpllFuncCntl_2) = (__uint32_t)(__builtin_constant_p
(memory_level->MpllFuncCntl_2) ? (__uint32_t)(((__uint32_t
)(memory_level->MpllFuncCntl_2) & 0xff) << 24 | (
(__uint32_t)(memory_level->MpllFuncCntl_2) & 0xff00) <<
8 | ((__uint32_t)(memory_level->MpllFuncCntl_2) & 0xff0000
) >> 8 | ((__uint32_t)(memory_level->MpllFuncCntl_2)
& 0xff000000) >> 24) : __swap32md(memory_level->
MpllFuncCntl_2)))
;
1075 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl)((memory_level->MpllAdFuncCntl) = (__uint32_t)(__builtin_constant_p
(memory_level->MpllAdFuncCntl) ? (__uint32_t)(((__uint32_t
)(memory_level->MpllAdFuncCntl) & 0xff) << 24 | (
(__uint32_t)(memory_level->MpllAdFuncCntl) & 0xff00) <<
8 | ((__uint32_t)(memory_level->MpllAdFuncCntl) & 0xff0000
) >> 8 | ((__uint32_t)(memory_level->MpllAdFuncCntl)
& 0xff000000) >> 24) : __swap32md(memory_level->
MpllAdFuncCntl)))
;
1076 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl)((memory_level->MpllDqFuncCntl) = (__uint32_t)(__builtin_constant_p
(memory_level->MpllDqFuncCntl) ? (__uint32_t)(((__uint32_t
)(memory_level->MpllDqFuncCntl) & 0xff) << 24 | (
(__uint32_t)(memory_level->MpllDqFuncCntl) & 0xff00) <<
8 | ((__uint32_t)(memory_level->MpllDqFuncCntl) & 0xff0000
) >> 8 | ((__uint32_t)(memory_level->MpllDqFuncCntl)
& 0xff000000) >> 24) : __swap32md(memory_level->
MpllDqFuncCntl)))
;
1077 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl)((memory_level->MclkPwrmgtCntl) = (__uint32_t)(__builtin_constant_p
(memory_level->MclkPwrmgtCntl) ? (__uint32_t)(((__uint32_t
)(memory_level->MclkPwrmgtCntl) & 0xff) << 24 | (
(__uint32_t)(memory_level->MclkPwrmgtCntl) & 0xff00) <<
8 | ((__uint32_t)(memory_level->MclkPwrmgtCntl) & 0xff0000
) >> 8 | ((__uint32_t)(memory_level->MclkPwrmgtCntl)
& 0xff000000) >> 24) : __swap32md(memory_level->
MclkPwrmgtCntl)))
;
1078 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl)((memory_level->DllCntl) = (__uint32_t)(__builtin_constant_p
(memory_level->DllCntl) ? (__uint32_t)(((__uint32_t)(memory_level
->DllCntl) & 0xff) << 24 | ((__uint32_t)(memory_level
->DllCntl) & 0xff00) << 8 | ((__uint32_t)(memory_level
->DllCntl) & 0xff0000) >> 8 | ((__uint32_t)(memory_level
->DllCntl) & 0xff000000) >> 24) : __swap32md(memory_level
->DllCntl)))
;
1079 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1)((memory_level->MpllSs1) = (__uint32_t)(__builtin_constant_p
(memory_level->MpllSs1) ? (__uint32_t)(((__uint32_t)(memory_level
->MpllSs1) & 0xff) << 24 | ((__uint32_t)(memory_level
->MpllSs1) & 0xff00) << 8 | ((__uint32_t)(memory_level
->MpllSs1) & 0xff0000) >> 8 | ((__uint32_t)(memory_level
->MpllSs1) & 0xff000000) >> 24) : __swap32md(memory_level
->MpllSs1)))
;
1080 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2)((memory_level->MpllSs2) = (__uint32_t)(__builtin_constant_p
(memory_level->MpllSs2) ? (__uint32_t)(((__uint32_t)(memory_level
->MpllSs2) & 0xff) << 24 | ((__uint32_t)(memory_level
->MpllSs2) & 0xff00) << 8 | ((__uint32_t)(memory_level
->MpllSs2) & 0xff0000) >> 8 | ((__uint32_t)(memory_level
->MpllSs2) & 0xff000000) >> 24) : __swap32md(memory_level
->MpllSs2)))
;
1081 }
1082
1083 return result;
1084}
1085
1086static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1087{
1088 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1089 struct tonga_smumgr *smu_data =
1090 (struct tonga_smumgr *)(hwmgr->smu_backend);
1091 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1092 int result;
1093
1094 /* populate MCLK dpm table to SMU7 */
1095 uint32_t level_array_address =
1096 smu_data->smu7_data.dpm_table_start +
1097 offsetof(SMU72_Discrete_DpmTable, MemoryLevel)__builtin_offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
1098 uint32_t level_array_size =
1099 sizeof(SMU72_Discrete_MemoryLevel) *
1100 SMU72_MAX_LEVELS_MEMORY4;
1101 SMU72_Discrete_MemoryLevel *levels =
1102 smu_data->smc_state_table.MemoryLevel;
1103 uint32_t i;
1104
1105 memset(levels, 0x00, level_array_size)__builtin_memset((levels), (0x00), (level_array_size));
1106
1107 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1108 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),do { if (!((0 != dpm_table->mclk_table.dpm_levels[i].value
))) { printk("\0014" "amdgpu: " "%s\n", "can not populate memory level as memory clock is zero"
); return -22; } } while (0)
1109 "can not populate memory level as memory clock is zero",do { if (!((0 != dpm_table->mclk_table.dpm_levels[i].value
))) { printk("\0014" "amdgpu: " "%s\n", "can not populate memory level as memory clock is zero"
); return -22; } } while (0)
1110 return -EINVAL)do { if (!((0 != dpm_table->mclk_table.dpm_levels[i].value
))) { printk("\0014" "amdgpu: " "%s\n", "can not populate memory level as memory clock is zero"
); return -22; } } while (0)
;
1111 result = tonga_populate_single_memory_level(
1112 hwmgr,
1113 dpm_table->mclk_table.dpm_levels[i].value,
1114 &(smu_data->smc_state_table.MemoryLevel[i]));
1115 if (result)
1116 return result;
1117 }
1118
1119 /* Only enable level 0 for now.*/
1120 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1121
1122 /*
1123 * in order to prevent MC activity from stutter mode to push DPM up.
1124 * the UVD change complements this by putting the MCLK in a higher state
1125 * by default such that we are not effected by up threshold or and MCLK DPM latency.
1126 */
1127 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1128 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel)((smu_data->smc_state_table.MemoryLevel[0].ActivityLevel) =
(__uint16_t)(__builtin_constant_p(smu_data->smc_state_table
.MemoryLevel[0].ActivityLevel) ? (__uint16_t)(((__uint16_t)(smu_data
->smc_state_table.MemoryLevel[0].ActivityLevel) & 0xffU
) << 8 | ((__uint16_t)(smu_data->smc_state_table.MemoryLevel
[0].ActivityLevel) & 0xff00U) >> 8) : __swap16md(smu_data
->smc_state_table.MemoryLevel[0].ActivityLevel)))
;
1129
1130 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1131 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1132 /* set highest level watermark to high*/
1133 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH1;
1134
1135 /* level count will send to smc once at init smc table and never change*/
1136 result = smu7_copy_bytes_to_smc(hwmgr,
1137 level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1138 SMC_RAM_END0x40000);
1139
1140 return result;
1141}
1142
1143static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
1144 uint32_t mclk, SMIO_Pattern *smio_pattern)
1145{
1146 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1147 struct phm_ppt_v1_information *table_info =
1148 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1149 uint32_t i = 0;
1150
1151 if (SMU7_VOLTAGE_CONTROL_NONE0x0 != data->mvdd_control) {
1152 /* find mvdd value which clock is more than request */
1153 for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
1154 if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
1155 /* Always round to higher voltage. */
1156 smio_pattern->Voltage =
1157 data->mvdd_voltage_table.entries[i].value;
1158 break;
1159 }
1160 }
1161
1162 PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,do { if (!(i < table_info->vdd_dep_on_mclk->count)) {
printk("\0014" "amdgpu: " "%s\n", "MVDD Voltage is outside the supported range."
); return -22; } } while (0)
1163 "MVDD Voltage is outside the supported range.",do { if (!(i < table_info->vdd_dep_on_mclk->count)) {
printk("\0014" "amdgpu: " "%s\n", "MVDD Voltage is outside the supported range."
); return -22; } } while (0)
1164 return -EINVAL)do { if (!(i < table_info->vdd_dep_on_mclk->count)) {
printk("\0014" "amdgpu: " "%s\n", "MVDD Voltage is outside the supported range."
); return -22; } } while (0)
;
1165 } else {
1166 return -EINVAL22;
1167 }
1168
1169 return 0;
1170}
1171
1172
1173static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1174 SMU72_Discrete_DpmTable *table)
1175{
1176 int result = 0;
1177 struct tonga_smumgr *smu_data =
1178 (struct tonga_smumgr *)(hwmgr->smu_backend);
1179 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1180 struct pp_atomctrl_clock_dividers_vi dividers;
1181
1182 SMIO_Pattern voltage_level;
1183 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1184 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1185 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1186 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1187
1188 /* The ACPI state should not do DPM on DC (or ever).*/
1189 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC0x01;
1190
1191 table->ACPILevel.MinVoltage =
1192 smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
1193
1194 /* assign zero for now*/
1195 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1196
1197 /* get the engine clock dividers for this clock value*/
1198 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1199 table->ACPILevel.SclkFrequency, &dividers);
1200
1201 PP_ASSERT_WITH_CODE(result == 0,do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS."
); return result; } } while (0)
1202 "Error retrieving Engine Clock dividers from VBIOS.",do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS."
); return result; } } while (0)
1203 return result)do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS."
); return result; } } while (0)
;
1204
1205 /* divider ID for required SCLK*/
1206 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1207 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW0;
1208 table->ACPILevel.DeepSleepDivId = 0;
1209
1210 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,(((spll_func_cntl) & ~0x2) | (0x2 & ((0) << 0x1
)))
1211 SPLL_PWRON, 0)(((spll_func_cntl) & ~0x2) | (0x2 & ((0) << 0x1
)))
;
1212 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,(((spll_func_cntl) & ~0x1) | (0x1 & ((1) << 0x0
)))
1213 SPLL_RESET, 1)(((spll_func_cntl) & ~0x1) | (0x1 & ((1) << 0x0
)))
;
1214 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,(((spll_func_cntl_2) & ~0x1ff) | (0x1ff & ((4) <<
0x0)))
1215 SCLK_MUX_SEL, 4)(((spll_func_cntl_2) & ~0x1ff) | (0x1ff & ((4) <<
0x0)))
;
1216
1217 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1218 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1219 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1220 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1221 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1222 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1223 table->ACPILevel.CcPwrDynRm = 0;
1224 table->ACPILevel.CcPwrDynRm1 = 0;
1225
1226
1227 /* For various features to be enabled/disabled while this level is active.*/
1228 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags)((table->ACPILevel.Flags) = (__uint32_t)(__builtin_constant_p
(table->ACPILevel.Flags) ? (__uint32_t)(((__uint32_t)(table
->ACPILevel.Flags) & 0xff) << 24 | ((__uint32_t)
(table->ACPILevel.Flags) & 0xff00) << 8 | ((__uint32_t
)(table->ACPILevel.Flags) & 0xff0000) >> 8 | ((__uint32_t
)(table->ACPILevel.Flags) & 0xff000000) >> 24) :
__swap32md(table->ACPILevel.Flags)))
;
1229 /* SCLK frequency in units of 10KHz*/
1230 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency)((table->ACPILevel.SclkFrequency) = (__uint32_t)(__builtin_constant_p
(table->ACPILevel.SclkFrequency) ? (__uint32_t)(((__uint32_t
)(table->ACPILevel.SclkFrequency) & 0xff) << 24 |
((__uint32_t)(table->ACPILevel.SclkFrequency) & 0xff00
) << 8 | ((__uint32_t)(table->ACPILevel.SclkFrequency
) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel
.SclkFrequency) & 0xff000000) >> 24) : __swap32md(table
->ACPILevel.SclkFrequency)))
;
1231 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl)((table->ACPILevel.CgSpllFuncCntl) = (__uint32_t)(__builtin_constant_p
(table->ACPILevel.CgSpllFuncCntl) ? (__uint32_t)(((__uint32_t
)(table->ACPILevel.CgSpllFuncCntl) & 0xff) << 24
| ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl) & 0xff00
) << 8 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl
) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel
.CgSpllFuncCntl) & 0xff000000) >> 24) : __swap32md(
table->ACPILevel.CgSpllFuncCntl)))
;
1232 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2)((table->ACPILevel.CgSpllFuncCntl2) = (__uint32_t)(__builtin_constant_p
(table->ACPILevel.CgSpllFuncCntl2) ? (__uint32_t)(((__uint32_t
)(table->ACPILevel.CgSpllFuncCntl2) & 0xff) << 24
| ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl2) & 0xff00
) << 8 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl2
) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel
.CgSpllFuncCntl2) & 0xff000000) >> 24) : __swap32md
(table->ACPILevel.CgSpllFuncCntl2)))
;
1233 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3)((table->ACPILevel.CgSpllFuncCntl3) = (__uint32_t)(__builtin_constant_p
(table->ACPILevel.CgSpllFuncCntl3) ? (__uint32_t)(((__uint32_t
)(table->ACPILevel.CgSpllFuncCntl3) & 0xff) << 24
| ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl3) & 0xff00
) << 8 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl3
) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel
.CgSpllFuncCntl3) & 0xff000000) >> 24) : __swap32md
(table->ACPILevel.CgSpllFuncCntl3)))
;
1234 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4)((table->ACPILevel.CgSpllFuncCntl4) = (__uint32_t)(__builtin_constant_p
(table->ACPILevel.CgSpllFuncCntl4) ? (__uint32_t)(((__uint32_t
)(table->ACPILevel.CgSpllFuncCntl4) & 0xff) << 24
| ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl4) & 0xff00
) << 8 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl4
) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel
.CgSpllFuncCntl4) & 0xff000000) >> 24) : __swap32md
(table->ACPILevel.CgSpllFuncCntl4)))
;
1235 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum)((table->ACPILevel.SpllSpreadSpectrum) = (__uint32_t)(__builtin_constant_p
(table->ACPILevel.SpllSpreadSpectrum) ? (__uint32_t)(((__uint32_t
)(table->ACPILevel.SpllSpreadSpectrum) & 0xff) <<
24 | ((__uint32_t)(table->ACPILevel.SpllSpreadSpectrum) &
0xff00) << 8 | ((__uint32_t)(table->ACPILevel.SpllSpreadSpectrum
) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel
.SpllSpreadSpectrum) & 0xff000000) >> 24) : __swap32md
(table->ACPILevel.SpllSpreadSpectrum)))
;
1236 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2)((table->ACPILevel.SpllSpreadSpectrum2) = (__uint32_t)(__builtin_constant_p
(table->ACPILevel.SpllSpreadSpectrum2) ? (__uint32_t)(((__uint32_t
)(table->ACPILevel.SpllSpreadSpectrum2) & 0xff) <<
24 | ((__uint32_t)(table->ACPILevel.SpllSpreadSpectrum2) &
0xff00) << 8 | ((__uint32_t)(table->ACPILevel.SpllSpreadSpectrum2
) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel
.SpllSpreadSpectrum2) & 0xff000000) >> 24) : __swap32md
(table->ACPILevel.SpllSpreadSpectrum2)))
;
1237 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm)((table->ACPILevel.CcPwrDynRm) = (__uint32_t)(__builtin_constant_p
(table->ACPILevel.CcPwrDynRm) ? (__uint32_t)(((__uint32_t)
(table->ACPILevel.CcPwrDynRm) & 0xff) << 24 | ((
__uint32_t)(table->ACPILevel.CcPwrDynRm) & 0xff00) <<
8 | ((__uint32_t)(table->ACPILevel.CcPwrDynRm) & 0xff0000
) >> 8 | ((__uint32_t)(table->ACPILevel.CcPwrDynRm) &
0xff000000) >> 24) : __swap32md(table->ACPILevel.CcPwrDynRm
)))
;
1238 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1)((table->ACPILevel.CcPwrDynRm1) = (__uint32_t)(__builtin_constant_p
(table->ACPILevel.CcPwrDynRm1) ? (__uint32_t)(((__uint32_t
)(table->ACPILevel.CcPwrDynRm1) & 0xff) << 24 | (
(__uint32_t)(table->ACPILevel.CcPwrDynRm1) & 0xff00) <<
8 | ((__uint32_t)(table->ACPILevel.CcPwrDynRm1) & 0xff0000
) >> 8 | ((__uint32_t)(table->ACPILevel.CcPwrDynRm1)
& 0xff000000) >> 24) : __swap32md(table->ACPILevel
.CcPwrDynRm1)))
;
1239
1240 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1241 table->MemoryACPILevel.MinVoltage =
1242 smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
1243
1244 /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
1245
1246 if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
1247 table->MemoryACPILevel.MinMvdd =
1248 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE)(__uint32_t)(__builtin_constant_p(voltage_level.Voltage * 4) ?
(__uint32_t)(((__uint32_t)(voltage_level.Voltage * 4) & 0xff
) << 24 | ((__uint32_t)(voltage_level.Voltage * 4) &
0xff00) << 8 | ((__uint32_t)(voltage_level.Voltage * 4
) & 0xff0000) >> 8 | ((__uint32_t)(voltage_level.Voltage
* 4) & 0xff000000) >> 24) : __swap32md(voltage_level
.Voltage * 4))
;
1249 else
1250 table->MemoryACPILevel.MinMvdd = 0;
1251
1252 /* Force reset on DLL*/
1253 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,(((mclk_pwrmgt_cntl) & ~0x10000) | (0x10000 & ((0x1) <<
0x10)))
1254 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1)(((mclk_pwrmgt_cntl) & ~0x10000) | (0x10000 & ((0x1) <<
0x10)))
;
1255 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,(((mclk_pwrmgt_cntl) & ~0x20000) | (0x20000 & ((0x1) <<
0x11)))
1256 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1)(((mclk_pwrmgt_cntl) & ~0x20000) | (0x20000 & ((0x1) <<
0x11)))
;
1257
1258 /* Disable DLL in ACPIState*/
1259 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,(((mclk_pwrmgt_cntl) & ~0x100) | (0x100 & ((0) <<
0x8)))
1260 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0)(((mclk_pwrmgt_cntl) & ~0x100) | (0x100 & ((0) <<
0x8)))
;
1261 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,(((mclk_pwrmgt_cntl) & ~0x200) | (0x200 & ((0) <<
0x9)))
1262 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0)(((mclk_pwrmgt_cntl) & ~0x200) | (0x200 & ((0) <<
0x9)))
;
1263
1264 /* Enable DLL bypass signal*/
1265 dll_cntl = PHM_SET_FIELD(dll_cntl,(((dll_cntl) & ~0x1000000) | (0x1000000 & ((0) <<
0x18)))
1266 DLL_CNTL, MRDCK0_BYPASS, 0)(((dll_cntl) & ~0x1000000) | (0x1000000 & ((0) <<
0x18)))
;
1267 dll_cntl = PHM_SET_FIELD(dll_cntl,(((dll_cntl) & ~0x2000000) | (0x2000000 & ((0) <<
0x19)))
1268 DLL_CNTL, MRDCK1_BYPASS, 0)(((dll_cntl) & ~0x2000000) | (0x2000000 & ((0) <<
0x19)))
;
1269
1270 table->MemoryACPILevel.DllCntl =
1271 PP_HOST_TO_SMC_UL(dll_cntl)(__uint32_t)(__builtin_constant_p(dll_cntl) ? (__uint32_t)(((
__uint32_t)(dll_cntl) & 0xff) << 24 | ((__uint32_t)
(dll_cntl) & 0xff00) << 8 | ((__uint32_t)(dll_cntl)
& 0xff0000) >> 8 | ((__uint32_t)(dll_cntl) & 0xff000000
) >> 24) : __swap32md(dll_cntl))
;
1272 table->MemoryACPILevel.MclkPwrmgtCntl =
1273 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl)(__uint32_t)(__builtin_constant_p(mclk_pwrmgt_cntl) ? (__uint32_t
)(((__uint32_t)(mclk_pwrmgt_cntl) & 0xff) << 24 | (
(__uint32_t)(mclk_pwrmgt_cntl) & 0xff00) << 8 | ((__uint32_t
)(mclk_pwrmgt_cntl) & 0xff0000) >> 8 | ((__uint32_t
)(mclk_pwrmgt_cntl) & 0xff000000) >> 24) : __swap32md
(mclk_pwrmgt_cntl))
;
1274 table->MemoryACPILevel.MpllAdFuncCntl =
1275 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL)(__uint32_t)(__builtin_constant_p(data->clock_registers.vMPLL_AD_FUNC_CNTL
) ? (__uint32_t)(((__uint32_t)(data->clock_registers.vMPLL_AD_FUNC_CNTL
) & 0xff) << 24 | ((__uint32_t)(data->clock_registers
.vMPLL_AD_FUNC_CNTL) & 0xff00) << 8 | ((__uint32_t)
(data->clock_registers.vMPLL_AD_FUNC_CNTL) & 0xff0000)
>> 8 | ((__uint32_t)(data->clock_registers.vMPLL_AD_FUNC_CNTL
) & 0xff000000) >> 24) : __swap32md(data->clock_registers
.vMPLL_AD_FUNC_CNTL))
;
1276 table->MemoryACPILevel.MpllDqFuncCntl =
1277 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL)(__uint32_t)(__builtin_constant_p(data->clock_registers.vMPLL_DQ_FUNC_CNTL
) ? (__uint32_t)(((__uint32_t)(data->clock_registers.vMPLL_DQ_FUNC_CNTL
) & 0xff) << 24 | ((__uint32_t)(data->clock_registers
.vMPLL_DQ_FUNC_CNTL) & 0xff00) << 8 | ((__uint32_t)
(data->clock_registers.vMPLL_DQ_FUNC_CNTL) & 0xff0000)
>> 8 | ((__uint32_t)(data->clock_registers.vMPLL_DQ_FUNC_CNTL
) & 0xff000000) >> 24) : __swap32md(data->clock_registers
.vMPLL_DQ_FUNC_CNTL))
;
1278 table->MemoryACPILevel.MpllFuncCntl =
1279 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL)(__uint32_t)(__builtin_constant_p(data->clock_registers.vMPLL_FUNC_CNTL
) ? (__uint32_t)(((__uint32_t)(data->clock_registers.vMPLL_FUNC_CNTL
) & 0xff) << 24 | ((__uint32_t)(data->clock_registers
.vMPLL_FUNC_CNTL) & 0xff00) << 8 | ((__uint32_t)(data
->clock_registers.vMPLL_FUNC_CNTL) & 0xff0000) >>
8 | ((__uint32_t)(data->clock_registers.vMPLL_FUNC_CNTL) &
0xff000000) >> 24) : __swap32md(data->clock_registers
.vMPLL_FUNC_CNTL))
;
1280 table->MemoryACPILevel.MpllFuncCntl_1 =
1281 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1)(__uint32_t)(__builtin_constant_p(data->clock_registers.vMPLL_FUNC_CNTL_1
) ? (__uint32_t)(((__uint32_t)(data->clock_registers.vMPLL_FUNC_CNTL_1
) & 0xff) << 24 | ((__uint32_t)(data->clock_registers
.vMPLL_FUNC_CNTL_1) & 0xff00) << 8 | ((__uint32_t)(
data->clock_registers.vMPLL_FUNC_CNTL_1) & 0xff0000) >>
8 | ((__uint32_t)(data->clock_registers.vMPLL_FUNC_CNTL_1
) & 0xff000000) >> 24) : __swap32md(data->clock_registers
.vMPLL_FUNC_CNTL_1))
;
1282 table->MemoryACPILevel.MpllFuncCntl_2 =
1283 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2)(__uint32_t)(__builtin_constant_p(data->clock_registers.vMPLL_FUNC_CNTL_2
) ? (__uint32_t)(((__uint32_t)(data->clock_registers.vMPLL_FUNC_CNTL_2
) & 0xff) << 24 | ((__uint32_t)(data->clock_registers
.vMPLL_FUNC_CNTL_2) & 0xff00) << 8 | ((__uint32_t)(
data->clock_registers.vMPLL_FUNC_CNTL_2) & 0xff0000) >>
8 | ((__uint32_t)(data->clock_registers.vMPLL_FUNC_CNTL_2
) & 0xff000000) >> 24) : __swap32md(data->clock_registers
.vMPLL_FUNC_CNTL_2))
;
1284 table->MemoryACPILevel.MpllSs1 =
1285 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1)(__uint32_t)(__builtin_constant_p(data->clock_registers.vMPLL_SS1
) ? (__uint32_t)(((__uint32_t)(data->clock_registers.vMPLL_SS1
) & 0xff) << 24 | ((__uint32_t)(data->clock_registers
.vMPLL_SS1) & 0xff00) << 8 | ((__uint32_t)(data->
clock_registers.vMPLL_SS1) & 0xff0000) >> 8 | ((__uint32_t
)(data->clock_registers.vMPLL_SS1) & 0xff000000) >>
24) : __swap32md(data->clock_registers.vMPLL_SS1))
;
1286 table->MemoryACPILevel.MpllSs2 =
1287 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2)(__uint32_t)(__builtin_constant_p(data->clock_registers.vMPLL_SS2
) ? (__uint32_t)(((__uint32_t)(data->clock_registers.vMPLL_SS2
) & 0xff) << 24 | ((__uint32_t)(data->clock_registers
.vMPLL_SS2) & 0xff00) << 8 | ((__uint32_t)(data->
clock_registers.vMPLL_SS2) & 0xff0000) >> 8 | ((__uint32_t
)(data->clock_registers.vMPLL_SS2) & 0xff000000) >>
24) : __swap32md(data->clock_registers.vMPLL_SS2))
;
1288
1289 table->MemoryACPILevel.EnabledForThrottle = 0;
1290 table->MemoryACPILevel.EnabledForActivity = 0;
1291 table->MemoryACPILevel.UpHyst = 0;
1292 table->MemoryACPILevel.DownHyst = 100;
1293 table->MemoryACPILevel.VoltageDownHyst = 0;
1294 /* Indicates maximum activity level for this performance level.*/
1295 table->MemoryACPILevel.ActivityLevel =
1296 PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity)(__uint16_t)(__builtin_constant_p(data->current_profile_setting
.mclk_activity) ? (__uint16_t)(((__uint16_t)(data->current_profile_setting
.mclk_activity) & 0xffU) << 8 | ((__uint16_t)(data->
current_profile_setting.mclk_activity) & 0xff00U) >>
8) : __swap16md(data->current_profile_setting.mclk_activity
))
;
1297
1298 table->MemoryACPILevel.StutterEnable = 0;
1299 table->MemoryACPILevel.StrobeEnable = 0;
1300 table->MemoryACPILevel.EdcReadEnable = 0;
1301 table->MemoryACPILevel.EdcWriteEnable = 0;
1302 table->MemoryACPILevel.RttEnable = 0;
1303
1304 return result;
1305}
1306
1307static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1308 SMU72_Discrete_DpmTable *table)
1309{
1310 int result = 0;
1311
1312 uint8_t count;
1313 pp_atomctrl_clock_dividers_vi dividers;
1314 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1315 struct phm_ppt_v1_information *pptable_info =
1316 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1317 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1318 pptable_info->mm_dep_table;
1319
1320 table->UvdLevelCount = (uint8_t) (mm_table->count);
1321 table->UvdBootLevel = 0;
1322
1323 for (count = 0; count < table->UvdLevelCount; count++) {
1324 table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
1325 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1326 table->UvdLevel[count].MinVoltage.Vddc =
1327 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1328 mm_table->entries[count].vddc);
1329 table->UvdLevel[count].MinVoltage.VddGfx =
1330 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID20x2) ?
1331 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1332 mm_table->entries[count].vddgfx) : 0;
1333 table->UvdLevel[count].MinVoltage.Vddci =
1334 phm_get_voltage_id(&data->vddci_voltage_table,
1335 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA200);
1336 table->UvdLevel[count].MinVoltage.Phases = 1;
1337
1338 /* retrieve divider value for VBIOS */
1339 result = atomctrl_get_dfs_pll_dividers_vi(
1340 hwmgr,
1341 table->UvdLevel[count].VclkFrequency,
1342 &dividers);
1343
1344 PP_ASSERT_WITH_CODE((!result),do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find divide id for Vclk clock"
); return result; } } while (0)
1345 "can not find divide id for Vclk clock",do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find divide id for Vclk clock"
); return result; } } while (0)
1346 return result)do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find divide id for Vclk clock"
); return result; } } while (0)
;
1347
1348 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1349
1350 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1351 table->UvdLevel[count].DclkFrequency, &dividers);
1352 PP_ASSERT_WITH_CODE((!result),do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find divide id for Dclk clock"
); return result; } } while (0)
1353 "can not find divide id for Dclk clock",do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find divide id for Dclk clock"
); return result; } } while (0)
1354 return result)do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find divide id for Dclk clock"
); return result; } } while (0)
;
1355
1356 table->UvdLevel[count].DclkDivider =
1357 (uint8_t)dividers.pll_post_divider;
1358
1359 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency)((table->UvdLevel[count].VclkFrequency) = (__uint32_t)(__builtin_constant_p
(table->UvdLevel[count].VclkFrequency) ? (__uint32_t)(((__uint32_t
)(table->UvdLevel[count].VclkFrequency) & 0xff) <<
24 | ((__uint32_t)(table->UvdLevel[count].VclkFrequency) &
0xff00) << 8 | ((__uint32_t)(table->UvdLevel[count]
.VclkFrequency) & 0xff0000) >> 8 | ((__uint32_t)(table
->UvdLevel[count].VclkFrequency) & 0xff000000) >>
24) : __swap32md(table->UvdLevel[count].VclkFrequency)))
;
1360 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency)((table->UvdLevel[count].DclkFrequency) = (__uint32_t)(__builtin_constant_p
(table->UvdLevel[count].DclkFrequency) ? (__uint32_t)(((__uint32_t
)(table->UvdLevel[count].DclkFrequency) & 0xff) <<
24 | ((__uint32_t)(table->UvdLevel[count].DclkFrequency) &
0xff00) << 8 | ((__uint32_t)(table->UvdLevel[count]
.DclkFrequency) & 0xff0000) >> 8 | ((__uint32_t)(table
->UvdLevel[count].DclkFrequency) & 0xff000000) >>
24) : __swap32md(table->UvdLevel[count].DclkFrequency)))
;
1361 }
1362
1363 return result;
1364
1365}
1366
1367static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1368 SMU72_Discrete_DpmTable *table)
1369{
1370 int result = 0;
1371
1372 uint8_t count;
1373 pp_atomctrl_clock_dividers_vi dividers;
1374 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1375 struct phm_ppt_v1_information *pptable_info =
1376 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1377 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1378 pptable_info->mm_dep_table;
1379
1380 table->VceLevelCount = (uint8_t) (mm_table->count);
1381 table->VceBootLevel = 0;
1382
1383 for (count = 0; count < table->VceLevelCount; count++) {
1384 table->VceLevel[count].Frequency =
1385 mm_table->entries[count].eclk;
1386 table->VceLevel[count].MinVoltage.Vddc =
1387 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1388 mm_table->entries[count].vddc);
1389 table->VceLevel[count].MinVoltage.VddGfx =
1390 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID20x2) ?
1391 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1392 mm_table->entries[count].vddgfx) : 0;
1393 table->VceLevel[count].MinVoltage.Vddci =
1394 phm_get_voltage_id(&data->vddci_voltage_table,
1395 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA200);
1396 table->VceLevel[count].MinVoltage.Phases = 1;
1397
1398 /* retrieve divider value for VBIOS */
1399 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1400 table->VceLevel[count].Frequency, &dividers);
1401 PP_ASSERT_WITH_CODE((!result),do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find divide id for VCE engine clock"
); return result; } } while (0)
1402 "can not find divide id for VCE engine clock",do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find divide id for VCE engine clock"
); return result; } } while (0)
1403 return result)do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find divide id for VCE engine clock"
); return result; } } while (0)
;
1404
1405 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1406
1407 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency)((table->VceLevel[count].Frequency) = (__uint32_t)(__builtin_constant_p
(table->VceLevel[count].Frequency) ? (__uint32_t)(((__uint32_t
)(table->VceLevel[count].Frequency) & 0xff) << 24
| ((__uint32_t)(table->VceLevel[count].Frequency) & 0xff00
) << 8 | ((__uint32_t)(table->VceLevel[count].Frequency
) & 0xff0000) >> 8 | ((__uint32_t)(table->VceLevel
[count].Frequency) & 0xff000000) >> 24) : __swap32md
(table->VceLevel[count].Frequency)))
;
1408 }
1409
1410 return result;
1411}
1412
1413static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1414 SMU72_Discrete_DpmTable *table)
1415{
1416 int result = 0;
1417 uint8_t count;
1418 pp_atomctrl_clock_dividers_vi dividers;
1419 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1420 struct phm_ppt_v1_information *pptable_info =
1421 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1422 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1423 pptable_info->mm_dep_table;
1424
1425 table->AcpLevelCount = (uint8_t) (mm_table->count);
1426 table->AcpBootLevel = 0;
1427
1428 for (count = 0; count < table->AcpLevelCount; count++) {
1429 table->AcpLevel[count].Frequency =
1430 pptable_info->mm_dep_table->entries[count].aclk;
1431 table->AcpLevel[count].MinVoltage.Vddc =
1432 phm_get_voltage_index(pptable_info->vddc_lookup_table,
1433 mm_table->entries[count].vddc);
1434 table->AcpLevel[count].MinVoltage.VddGfx =
1435 (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID20x2) ?
1436 phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
1437 mm_table->entries[count].vddgfx) : 0;
1438 table->AcpLevel[count].MinVoltage.Vddci =
1439 phm_get_voltage_id(&data->vddci_voltage_table,
1440 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA200);
1441 table->AcpLevel[count].MinVoltage.Phases = 1;
1442
1443 /* retrieve divider value for VBIOS */
1444 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1445 table->AcpLevel[count].Frequency, &dividers);
1446 PP_ASSERT_WITH_CODE((!result),do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find divide id for engine clock"
); return result; } } while (0)
1447 "can not find divide id for engine clock", return result)do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "can not find divide id for engine clock"
); return result; } } while (0)
;
1448
1449 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1450
1451 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency)((table->AcpLevel[count].Frequency) = (__uint32_t)(__builtin_constant_p
(table->AcpLevel[count].Frequency) ? (__uint32_t)(((__uint32_t
)(table->AcpLevel[count].Frequency) & 0xff) << 24
| ((__uint32_t)(table->AcpLevel[count].Frequency) & 0xff00
) << 8 | ((__uint32_t)(table->AcpLevel[count].Frequency
) & 0xff0000) >> 8 | ((__uint32_t)(table->AcpLevel
[count].Frequency) & 0xff000000) >> 24) : __swap32md
(table->AcpLevel[count].Frequency)))
;
1452 }
1453
1454 return result;
1455}
1456
1457static int tonga_populate_memory_timing_parameters(
1458 struct pp_hwmgr *hwmgr,
1459 uint32_t engine_clock,
1460 uint32_t memory_clock,
1461 struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
1462 )
1463{
1464 uint32_t dramTiming;
1465 uint32_t dramTiming2;
1466 uint32_t burstTime;
1467 int result;
1468
1469 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1470 engine_clock, memory_clock);
1471
1472 PP_ASSERT_WITH_CODE(result == 0,do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error calling VBIOS to set DRAM_TIMING."
); return result; } } while (0)
1473 "Error calling VBIOS to set DRAM_TIMING.", return result)do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error calling VBIOS to set DRAM_TIMING."
); return result; } } while (0)
;
1474
1475 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING)(((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0x9dd))
;
1476 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2)(((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0x9de))
;
1477 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0)((((((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0xa02))) & 0x1f) >> 0x0)
;
1478
1479 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming)(__uint32_t)(__builtin_constant_p(dramTiming) ? (__uint32_t)(
((__uint32_t)(dramTiming) & 0xff) << 24 | ((__uint32_t
)(dramTiming) & 0xff00) << 8 | ((__uint32_t)(dramTiming
) & 0xff0000) >> 8 | ((__uint32_t)(dramTiming) &
0xff000000) >> 24) : __swap32md(dramTiming))
;
1480 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2)(__uint32_t)(__builtin_constant_p(dramTiming2) ? (__uint32_t)
(((__uint32_t)(dramTiming2) & 0xff) << 24 | ((__uint32_t
)(dramTiming2) & 0xff00) << 8 | ((__uint32_t)(dramTiming2
) & 0xff0000) >> 8 | ((__uint32_t)(dramTiming2) &
0xff000000) >> 24) : __swap32md(dramTiming2))
;
1481 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1482
1483 return 0;
1484}
1485
1486static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1487{
1488 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1489 struct tonga_smumgr *smu_data =
1490 (struct tonga_smumgr *)(hwmgr->smu_backend);
1491 int result = 0;
1492 SMU72_Discrete_MCArbDramTimingTable arb_regs;
1493 uint32_t i, j;
1494
1495 memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable))__builtin_memset((&arb_regs), (0x00), (sizeof(SMU72_Discrete_MCArbDramTimingTable
)))
;
1496
1497 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1498 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1499 result = tonga_populate_memory_timing_parameters
1500 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1501 data->dpm_table.mclk_table.dpm_levels[j].value,
1502 &arb_regs.entries[i][j]);
1503
1504 if (result)
1505 break;
1506 }
1507 }
1508
1509 if (!result) {
1510 result = smu7_copy_bytes_to_smc(
1511 hwmgr,
1512 smu_data->smu7_data.arb_table_start,
1513 (uint8_t *)&arb_regs,
1514 sizeof(SMU72_Discrete_MCArbDramTimingTable),
1515 SMC_RAM_END0x40000
1516 );
1517 }
1518
1519 return result;
1520}
1521
1522static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1523 SMU72_Discrete_DpmTable *table)
1524{
1525 int result = 0;
1526 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1527 struct tonga_smumgr *smu_data =
1528 (struct tonga_smumgr *)(hwmgr->smu_backend);
1529 table->GraphicsBootLevel = 0;
1530 table->MemoryBootLevel = 0;
1531
1532 /* find boot level from dpm table*/
1533 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1534 data->vbios_boot_state.sclk_bootup_value,
1535 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1536
1537 if (result != 0) {
1538 smu_data->smc_state_table.GraphicsBootLevel = 0;
1539 pr_err("[powerplay] VBIOS did not find boot engine "printk("\0013" "amdgpu: " "[powerplay] VBIOS did not find boot engine "
"clock value in dependency table. " "Using Graphics DPM level 0 !"
)
1540 "clock value in dependency table. "printk("\0013" "amdgpu: " "[powerplay] VBIOS did not find boot engine "
"clock value in dependency table. " "Using Graphics DPM level 0 !"
)
1541 "Using Graphics DPM level 0 !")printk("\0013" "amdgpu: " "[powerplay] VBIOS did not find boot engine "
"clock value in dependency table. " "Using Graphics DPM level 0 !"
)
;
1542 result = 0;
Value stored to 'result' is never read
1543 }
1544
1545 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1546 data->vbios_boot_state.mclk_bootup_value,
1547 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1548
1549 if (result != 0) {
1550 smu_data->smc_state_table.MemoryBootLevel = 0;
1551 pr_err("[powerplay] VBIOS did not find boot "printk("\0013" "amdgpu: " "[powerplay] VBIOS did not find boot "
"engine clock value in dependency table." "Using Memory DPM level 0 !"
)
1552 "engine clock value in dependency table."printk("\0013" "amdgpu: " "[powerplay] VBIOS did not find boot "
"engine clock value in dependency table." "Using Memory DPM level 0 !"
)
1553 "Using Memory DPM level 0 !")printk("\0013" "amdgpu: " "[powerplay] VBIOS did not find boot "
"engine clock value in dependency table." "Using Memory DPM level 0 !"
)
;
1554 result = 0;
1555 }
1556
1557 table->BootVoltage.Vddc =
1558 phm_get_voltage_id(&(data->vddc_voltage_table),
1559 data->vbios_boot_state.vddc_bootup_value);
1560 table->BootVoltage.VddGfx =
1561 phm_get_voltage_id(&(data->vddgfx_voltage_table),
1562 data->vbios_boot_state.vddgfx_bootup_value);
1563 table->BootVoltage.Vddci =
1564 phm_get_voltage_id(&(data->vddci_voltage_table),
1565 data->vbios_boot_state.vddci_bootup_value);
1566 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1567
1568 CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd)((table->BootMVdd) = (__uint16_t)(__builtin_constant_p(table
->BootMVdd) ? (__uint16_t)(((__uint16_t)(table->BootMVdd
) & 0xffU) << 8 | ((__uint16_t)(table->BootMVdd)
& 0xff00U) >> 8) : __swap16md(table->BootMVdd))
)
;
1569
1570 return result;
1571}
1572
1573static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1574{
1575 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
1576 volt_with_cks, value;
1577 uint16_t clock_freq_u16;
1578 struct tonga_smumgr *smu_data =
1579 (struct tonga_smumgr *)(hwmgr->smu_backend);
1580 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
1581 volt_offset = 0;
1582 struct phm_ppt_v1_information *table_info =
1583 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1584 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1585 table_info->vdd_dep_on_sclk;
1586 uint32_t hw_revision, dev_id;
1587 struct amdgpu_device *adev = hwmgr->adev;
1588
1589 stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
1590
1591 hw_revision = adev->pdev->revision;
1592 dev_id = adev->pdev->device;
1593
1594 /* Read SMU_Eefuse to read and calculate RO and determine
1595 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1596 */
1597 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0100000 + (146 * 4)))
1598 ixSMU_EFUSE_0 + (146 * 4))(((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0100000 + (146 * 4)))
;
1599 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0100000 + (148 * 4)))
1600 ixSMU_EFUSE_0 + (148 * 4))(((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0100000 + (148 * 4)))
;
1601 efuse &= 0xFF000000;
1602 efuse = efuse >> 24;
1603 efuse2 &= 0xF;
1604
1605 if (efuse2 == 1)
1606 ro = (2300 - 1350) * efuse / 255 + 1350;
1607 else
1608 ro = (2500 - 1000) * efuse / 255 + 1000;
1609
1610 if (ro >= 1660)
1611 type = 0;
1612 else
1613 type = 1;
1614
1615 /* Populate Stretch amount */
1616 smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
1617
1618
1619 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1620 for (i = 0; i < sclk_table->count; i++) {
1621 smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1622 sclk_table->entries[i].cks_enable << i;
1623 if (ASICID_IS_TONGA_P(dev_id, hw_revision)(((dev_id == 0x6930) && ((hw_revision == 0xF0) || (hw_revision
== 0xF1) || (hw_revision == 0xFF))) || ((dev_id == 0x6920) &&
((hw_revision == 0) || (hw_revision == 1))))
) {
1624 volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
1625 (sclk_table->entries[i].clk/100) / 10000) * 1000 /
1626 (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
1627 volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
1628 (sclk_table->entries[i].clk/100) / 100000) * 1000 /
1629 (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
1630 } else {
1631 volt_without_cks = (uint32_t)((14041 *
1632 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
1633 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
1634 volt_with_cks = (uint32_t)((13946 *
1635 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
1636 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
1637 }
1638 if (volt_without_cks >= volt_with_cks)
1639 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1640 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
1641 smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1642 }
1643
1644 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0xc020034c))) & ~0x1) | (0x1 &
((0x0) << 0x0)))))
1645 STRETCH_ENABLE, 0x0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0xc020034c))) & ~0x1) | (0x1 &
((0x0) << 0x0)))))
;
1646 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0xc020034c))) & ~0x2) | (0x2 &
((0x1) << 0x1)))))
1647 masterReset, 0x1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0xc020034c))) & ~0x2) | (0x2 &
((0x1) << 0x1)))))
;
1648 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0xc020034c))) & ~0x4) | (0x4 &
((0x1) << 0x2)))))
1649 staticEnable, 0x1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0xc020034c))) & ~0x4) | (0x4 &
((0x1) << 0x2)))))
;
1650 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0xc020034c))) & ~0x2) | (0x2 &
((0x0) << 0x1)))))
1651 masterReset, 0x0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,0xc020034c))) & ~0x2) | (0x2 &
((0x0) << 0x1)))))
;
1652
1653 /* Populate CKS Lookup Table */
1654 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1655 stretch_amount2 = 0;
1656 else if (stretch_amount == 3 || stretch_amount == 4)
1657 stretch_amount2 = 1;
1658 else {
1659 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1660 PHM_PlatformCaps_ClockStretcher);
1661 PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Stretch Amount in PPTable not supported"
); return -22; } } while (0)
1662 "Stretch Amount in PPTable not supported",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Stretch Amount in PPTable not supported"
); return -22; } } while (0)
1663 return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Stretch Amount in PPTable not supported"
); return -22; } } while (0)
;
1664 }
1665
1666 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0200350))
1667 ixPWR_CKS_CNTL)(((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0200350))
;
1668 value &= 0xFFC2FF87;
1669 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
1670 tonga_clock_stretcher_lookup_table[stretch_amount2][0];
1671 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
1672 tonga_clock_stretcher_lookup_table[stretch_amount2][1];
1673 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.(__uint32_t)(__builtin_constant_p(smu_data->smc_state_table
. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount
- 1]. SclkFrequency) ? (__uint32_t)(((__uint32_t)(smu_data->
smc_state_table. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount
- 1]. SclkFrequency) & 0xff) << 24 | ((__uint32_t)
(smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table
.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff00) <<
8 | ((__uint32_t)(smu_data->smc_state_table. GraphicsLevel
[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency
) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table
. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount
- 1]. SclkFrequency) & 0xff000000) >> 24) : __swap32md
(smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table
.GraphicsDpmLevelCount - 1]. SclkFrequency))
1674 GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].(__uint32_t)(__builtin_constant_p(smu_data->smc_state_table
. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount
- 1]. SclkFrequency) ? (__uint32_t)(((__uint32_t)(smu_data->
smc_state_table. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount
- 1]. SclkFrequency) & 0xff) << 24 | ((__uint32_t)
(smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table
.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff00) <<
8 | ((__uint32_t)(smu_data->smc_state_table. GraphicsLevel
[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency
) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table
. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount
- 1]. SclkFrequency) & 0xff000000) >> 24) : __swap32md
(smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table
.GraphicsDpmLevelCount - 1]. SclkFrequency))
1675 SclkFrequency)(__uint32_t)(__builtin_constant_p(smu_data->smc_state_table
. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount
- 1]. SclkFrequency) ? (__uint32_t)(((__uint32_t)(smu_data->
smc_state_table. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount
- 1]. SclkFrequency) & 0xff) << 24 | ((__uint32_t)
(smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table
.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff00) <<
8 | ((__uint32_t)(smu_data->smc_state_table. GraphicsLevel
[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency
) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table
. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount
- 1]. SclkFrequency) & 0xff000000) >> 24) : __swap32md
(smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table
.GraphicsDpmLevelCount - 1]. SclkFrequency))
/ 100);
1676 if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
1677 clock_freq_u16 &&
1678 tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
1679 clock_freq_u16) {
1680 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
1681 value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
1682 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
1683 value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
1684 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
1685 value |= (tonga_clock_stretch_amount_conversion
1686 [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
1687 [stretch_amount]) << 3;
1688 }
1689 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.((smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry
[0].minFreq) = (__uint16_t)(__builtin_constant_p(smu_data->
smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0].minFreq
) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable
. CKS_LOOKUPTableEntry[0].minFreq) & 0xffU) << 8 | (
(__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry
[0].minFreq) & 0xff00U) >> 8) : __swap16md(smu_data
->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0]
.minFreq)))
1690 CKS_LOOKUPTableEntry[0].minFreq)((smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry
[0].minFreq) = (__uint16_t)(__builtin_constant_p(smu_data->
smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0].minFreq
) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable
. CKS_LOOKUPTableEntry[0].minFreq) & 0xffU) << 8 | (
(__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry
[0].minFreq) & 0xff00U) >> 8) : __swap16md(smu_data
->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0]
.minFreq)))
;
1691 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.((smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry
[0].maxFreq) = (__uint16_t)(__builtin_constant_p(smu_data->
smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0].maxFreq
) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable
. CKS_LOOKUPTableEntry[0].maxFreq) & 0xffU) << 8 | (
(__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry
[0].maxFreq) & 0xff00U) >> 8) : __swap16md(smu_data
->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0]
.maxFreq)))
1692 CKS_LOOKUPTableEntry[0].maxFreq)((smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry
[0].maxFreq) = (__uint16_t)(__builtin_constant_p(smu_data->
smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0].maxFreq
) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable
. CKS_LOOKUPTableEntry[0].maxFreq) & 0xffU) << 8 | (
(__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry
[0].maxFreq) & 0xff00U) >> 8) : __swap16md(smu_data
->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0]
.maxFreq)))
;
1693 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
1694 tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
1695 smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
1696 (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
1697
1698 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0200350,value))
1699 ixPWR_CKS_CNTL, value)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0200350,value))
;
1700
1701 /* Populate DDT Lookup Table */
1702 for (i = 0; i < 4; i++) {
1703 /* Assign the minimum and maximum VID stored
1704 * in the last row of Clock Stretcher Voltage Table.
1705 */
1706 smu_data->smc_state_table.ClockStretcherDataTable.
1707 ClockStretcherDataTableEntry[i].minVID =
1708 (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
1709 smu_data->smc_state_table.ClockStretcherDataTable.
1710 ClockStretcherDataTableEntry[i].maxVID =
1711 (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
1712 /* Loop through each SCLK and check the frequency
1713 * to see if it lies within the frequency for clock stretcher.
1714 */
1715 for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
1716 cks_setting = 0;
1717 clock_freq = PP_SMC_TO_HOST_UL((__uint32_t)(__builtin_constant_p(smu_data->smc_state_table
.GraphicsLevel[j].SclkFrequency) ? (__uint32_t)(((__uint32_t)
(smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency)
& 0xff) << 24 | ((__uint32_t)(smu_data->smc_state_table
.GraphicsLevel[j].SclkFrequency) & 0xff00) << 8 | (
(__uint32_t)(smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency
) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table
.GraphicsLevel[j].SclkFrequency) & 0xff000000) >> 24
) : __swap32md(smu_data->smc_state_table.GraphicsLevel[j].
SclkFrequency))
1718 smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency)(__uint32_t)(__builtin_constant_p(smu_data->smc_state_table
.GraphicsLevel[j].SclkFrequency) ? (__uint32_t)(((__uint32_t)
(smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency)
& 0xff) << 24 | ((__uint32_t)(smu_data->smc_state_table
.GraphicsLevel[j].SclkFrequency) & 0xff00) << 8 | (
(__uint32_t)(smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency
) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table
.GraphicsLevel[j].SclkFrequency) & 0xff000000) >> 24
) : __swap32md(smu_data->smc_state_table.GraphicsLevel[j].
SclkFrequency))
;
1719 /* Check the allowed frequency against the sclk level[j].
1720 * Sclk's endianness has already been converted,
1721 * and it's in 10Khz unit,
1722 * as opposed to Data table, which is in Mhz unit.
1723 */
1724 if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
1725 cks_setting |= 0x2;
1726 if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
1727 cks_setting |= 0x1;
1728 }
1729 smu_data->smc_state_table.ClockStretcherDataTable.
1730 ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
1731 }
1732 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.((smu_data->smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry
[i].setting) = (__uint16_t)(__builtin_constant_p(smu_data->
smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry
[i].setting) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table
. ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting
) & 0xffU) << 8 | ((__uint16_t)(smu_data->smc_state_table
. ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting
) & 0xff00U) >> 8) : __swap16md(smu_data->smc_state_table
. ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting
)))
1733 ClockStretcherDataTable.((smu_data->smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry
[i].setting) = (__uint16_t)(__builtin_constant_p(smu_data->
smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry
[i].setting) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table
. ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting
) & 0xffU) << 8 | ((__uint16_t)(smu_data->smc_state_table
. ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting
) & 0xff00U) >> 8) : __swap16md(smu_data->smc_state_table
. ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting
)))
1734 ClockStretcherDataTableEntry[i].setting)((smu_data->smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry
[i].setting) = (__uint16_t)(__builtin_constant_p(smu_data->
smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry
[i].setting) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table
. ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting
) & 0xffU) << 8 | ((__uint16_t)(smu_data->smc_state_table
. ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting
) & 0xff00U) >> 8) : __swap16md(smu_data->smc_state_table
. ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting
)))
;
1735 }
1736
1737 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0200350))
1738 ixPWR_CKS_CNTL)(((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0200350))
;
1739 value &= 0xFFFFFFFE;
1740 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0200350,value))
1741 ixPWR_CKS_CNTL, value)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0200350,value))
;
1742
1743 return 0;
1744}
1745
1746static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
1747 SMU72_Discrete_DpmTable *table)
1748{
1749 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1750 uint16_t config;
1751
1752 if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->vdd_gfx_control) {
1753 /* Splitted mode */
1754 config = VR_SVI2_PLANE_11;
1755 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT8);
1756
1757 if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->voltage_control) {
1758 config = VR_SVI2_PLANE_22;
1759 table->VRConfig |= config;
1760 } else {
1761 pr_err("VDDC and VDDGFX should "printk("\0013" "amdgpu: " "VDDC and VDDGFX should " "be both on SVI2 control in splitted mode !\n"
)
1762 "be both on SVI2 control in splitted mode !\n")printk("\0013" "amdgpu: " "VDDC and VDDGFX should " "be both on SVI2 control in splitted mode !\n"
)
;
1763 }
1764 } else {
1765 /* Merged mode */
1766 config = VR_MERGED_WITH_VDDC0;
1767 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT8);
1768
1769 /* Set Vddc Voltage Controller */
1770 if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->voltage_control) {
1771 config = VR_SVI2_PLANE_11;
1772 table->VRConfig |= config;
1773 } else {
1774 pr_err("VDDC should be on "printk("\0013" "amdgpu: " "VDDC should be on " "SVI2 control in merged mode !\n"
)
1775 "SVI2 control in merged mode !\n")printk("\0013" "amdgpu: " "VDDC should be on " "SVI2 control in merged mode !\n"
)
;
1776 }
1777 }
1778
1779 /* Set Vddci Voltage Controller */
1780 if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->vddci_control) {
1781 config = VR_SVI2_PLANE_22; /* only in merged mode */
1782 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT16);
1783 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO0x1 == data->vddci_control) {
1784 config = VR_SMIO_PATTERN_13;
1785 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT16);
1786 }
1787
1788 /* Set Mvdd Voltage Controller */
1789 if (SMU7_VOLTAGE_CONTROL_BY_GPIO0x1 == data->mvdd_control) {
1790 config = VR_SMIO_PATTERN_24;
1791 table->VRConfig |= (config<<VRCONF_MVDD_SHIFT24);
1792 }
1793
1794 return 0;
1795}
1796
1797static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
1798{
1799 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
1800 uint32_t tmp;
1801 int result;
1802
1803 /*
1804 * This is a read-modify-write on the first byte of the ARB table.
1805 * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
1806 * is the field 'current'.
1807 * This solution is ugly, but we never write the whole table only
1808 * individual fields in it.
1809 * In reality this field should not be in that structure
1810 * but in a soft register.
1811 */
1812 result = smu7_read_smc_sram_dword(hwmgr,
1813 smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END0x40000);
1814
1815 if (result != 0)
1816 return result;
1817
1818 tmp &= 0x00FFFFFF;
1819 tmp |= ((uint32_t)MC_CG_ARB_FREQ_F10x0b) << 24;
1820
1821 return smu7_write_smc_sram_dword(hwmgr,
1822 smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END0x40000);
1823}
1824
1825
1826static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
1827{
1828 struct tonga_smumgr *smu_data =
1829 (struct tonga_smumgr *)(hwmgr->smu_backend);
1830 const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1831 SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
1832 struct phm_ppt_v1_information *table_info =
1833 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1834 struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
1835 int i, j, k;
1836 const uint16_t *pdef1, *pdef2;
1837
1838 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p((uint16_t)(cac_dtp_table->
usTDP * 256)) ? (__uint16_t)(((__uint16_t)((uint16_t)(cac_dtp_table
->usTDP * 256)) & 0xffU) << 8 | ((__uint16_t)((uint16_t
)(cac_dtp_table->usTDP * 256)) & 0xff00U) >> 8) :
__swap16md((uint16_t)(cac_dtp_table->usTDP * 256)))
1839 (uint16_t)(cac_dtp_table->usTDP * 256))(__uint16_t)(__builtin_constant_p((uint16_t)(cac_dtp_table->
usTDP * 256)) ? (__uint16_t)(((__uint16_t)((uint16_t)(cac_dtp_table
->usTDP * 256)) & 0xffU) << 8 | ((__uint16_t)((uint16_t
)(cac_dtp_table->usTDP * 256)) & 0xff00U) >> 8) :
__swap16md((uint16_t)(cac_dtp_table->usTDP * 256)))
;
1840 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p((uint16_t)(cac_dtp_table->
usConfigurableTDP * 256)) ? (__uint16_t)(((__uint16_t)((uint16_t
)(cac_dtp_table->usConfigurableTDP * 256)) & 0xffU) <<
8 | ((__uint16_t)((uint16_t)(cac_dtp_table->usConfigurableTDP
* 256)) & 0xff00U) >> 8) : __swap16md((uint16_t)(cac_dtp_table
->usConfigurableTDP * 256)))
1841 (uint16_t)(cac_dtp_table->usConfigurableTDP * 256))(__uint16_t)(__builtin_constant_p((uint16_t)(cac_dtp_table->
usConfigurableTDP * 256)) ? (__uint16_t)(((__uint16_t)((uint16_t
)(cac_dtp_table->usConfigurableTDP * 256)) & 0xffU) <<
8 | ((__uint16_t)((uint16_t)(cac_dtp_table->usConfigurableTDP
* 256)) & 0xff00U) >> 8) : __swap16md((uint16_t)(cac_dtp_table
->usConfigurableTDP * 256)))
;
1842
1843 PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,do { if (!(cac_dtp_table->usTargetOperatingTemp <= 255)
) { printk("\0014" "amdgpu: " "%s\n", "Target Operating Temp is out of Range !"
); ; } } while (0)
1844 "Target Operating Temp is out of Range !",do { if (!(cac_dtp_table->usTargetOperatingTemp <= 255)
) { printk("\0014" "amdgpu: " "%s\n", "Target Operating Temp is out of Range !"
); ; } } while (0)
1845 )do { if (!(cac_dtp_table->usTargetOperatingTemp <= 255)
) { printk("\0014" "amdgpu: " "%s\n", "Target Operating Temp is out of Range !"
); ; } } while (0)
;
1846
1847 dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
1848 dpm_table->GpuTjHyst = 8;
1849
1850 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
1851
1852 dpm_table->BAPM_TEMP_GRADIENT =
1853 PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient)(__uint32_t)(__builtin_constant_p(defaults->bapm_temp_gradient
) ? (__uint32_t)(((__uint32_t)(defaults->bapm_temp_gradient
) & 0xff) << 24 | ((__uint32_t)(defaults->bapm_temp_gradient
) & 0xff00) << 8 | ((__uint32_t)(defaults->bapm_temp_gradient
) & 0xff0000) >> 8 | ((__uint32_t)(defaults->bapm_temp_gradient
) & 0xff000000) >> 24) : __swap32md(defaults->bapm_temp_gradient
))
;
1854 pdef1 = defaults->bapmti_r;
1855 pdef2 = defaults->bapmti_rc;
1856
1857 for (i = 0; i < SMU72_DTE_ITERATIONS5; i++) {
1858 for (j = 0; j < SMU72_DTE_SOURCES3; j++) {
1859 for (k = 0; k < SMU72_DTE_SINKS1; k++) {
1860 dpm_table->BAPMTI_R[i][j][k] =
1861 PP_HOST_TO_SMC_US(*pdef1)(__uint16_t)(__builtin_constant_p(*pdef1) ? (__uint16_t)(((__uint16_t
)(*pdef1) & 0xffU) << 8 | ((__uint16_t)(*pdef1) &
0xff00U) >> 8) : __swap16md(*pdef1))
;
1862 dpm_table->BAPMTI_RC[i][j][k] =
1863 PP_HOST_TO_SMC_US(*pdef2)(__uint16_t)(__builtin_constant_p(*pdef2) ? (__uint16_t)(((__uint16_t
)(*pdef2) & 0xffU) << 8 | ((__uint16_t)(*pdef2) &
0xff00U) >> 8) : __swap16md(*pdef2))
;
1864 pdef1++;
1865 pdef2++;
1866 }
1867 }
1868 }
1869
1870 return 0;
1871}
1872
1873static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
1874{
1875 struct tonga_smumgr *smu_data =
1876 (struct tonga_smumgr *)(hwmgr->smu_backend);
1877 const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1878
1879 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
1880 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
1881 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
1882 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
1883
1884 return 0;
1885}
1886
1887static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
1888{
1889 uint16_t tdc_limit;
1890 struct tonga_smumgr *smu_data =
1891 (struct tonga_smumgr *)(hwmgr->smu_backend);
1892 const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1893 struct phm_ppt_v1_information *table_info =
1894 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1895
1896 /* TDC number of fraction bits are changed from 8 to 7
1897 * for Fiji as requested by SMC team
1898 */
1899 tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
1900 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
1901 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit)((tdc_limit) = (__uint16_t)(__builtin_constant_p(tdc_limit) ?
(__uint16_t)(((__uint16_t)(tdc_limit) & 0xffU) << 8
| ((__uint16_t)(tdc_limit) & 0xff00U) >> 8) : __swap16md
(tdc_limit)))
;
1902 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
1903 defaults->tdc_vddc_throttle_release_limit_perc;
1904 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
1905
1906 return 0;
1907}
1908
1909static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
1910{
1911 struct tonga_smumgr *smu_data =
1912 (struct tonga_smumgr *)(hwmgr->smu_backend);
1913 const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
1914 uint32_t temp;
1915
1916 if (smu7_read_smc_sram_dword(hwmgr,
1917 fuse_table_offset +
1918 offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl)__builtin_offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
1919 (uint32_t *)&temp, SMC_RAM_END0x40000))
1920 PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to read PmFuses.DW6 "
"(SviLoadLineEn) from SMC Failed !"); return -22; } } while (
0)
1921 "Attempt to read PmFuses.DW6 "do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to read PmFuses.DW6 "
"(SviLoadLineEn) from SMC Failed !"); return -22; } } while (
0)
1922 "(SviLoadLineEn) from SMC Failed !",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to read PmFuses.DW6 "
"(SviLoadLineEn) from SMC Failed !"); return -22; } } while (
0)
1923 return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to read PmFuses.DW6 "
"(SviLoadLineEn) from SMC Failed !"); return -22; } } while (
0)
;
1924 else
1925 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
1926
1927 return 0;
1928}
1929
1930static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
1931{
1932 int i;
1933 struct tonga_smumgr *smu_data =
1934 (struct tonga_smumgr *)(hwmgr->smu_backend);
1935
1936 /* Currently not used. Set all to zero. */
1937 for (i = 0; i < 16; i++)
1938 smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
1939
1940 return 0;
1941}
1942
1943static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
1944{
1945 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
1946
1947 if ((hwmgr->thermal_controller.advanceFanControlParameters.
1948 usFanOutputSensitivity & (1 << 15)) ||
1949 (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
1950 hwmgr->thermal_controller.advanceFanControlParameters.
1951 usFanOutputSensitivity = hwmgr->thermal_controller.
1952 advanceFanControlParameters.usDefaultFanOutputSensitivity;
1953
1954 smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
1955 PP_HOST_TO_SMC_US(hwmgr->thermal_controller.(__uint16_t)(__builtin_constant_p(hwmgr->thermal_controller
. advanceFanControlParameters.usFanOutputSensitivity) ? (__uint16_t
)(((__uint16_t)(hwmgr->thermal_controller. advanceFanControlParameters
.usFanOutputSensitivity) & 0xffU) << 8 | ((__uint16_t
)(hwmgr->thermal_controller. advanceFanControlParameters.usFanOutputSensitivity
) & 0xff00U) >> 8) : __swap16md(hwmgr->thermal_controller
. advanceFanControlParameters.usFanOutputSensitivity))
1956 advanceFanControlParameters.usFanOutputSensitivity)(__uint16_t)(__builtin_constant_p(hwmgr->thermal_controller
. advanceFanControlParameters.usFanOutputSensitivity) ? (__uint16_t
)(((__uint16_t)(hwmgr->thermal_controller. advanceFanControlParameters
.usFanOutputSensitivity) & 0xffU) << 8 | ((__uint16_t
)(hwmgr->thermal_controller. advanceFanControlParameters.usFanOutputSensitivity
) & 0xff00U) >> 8) : __swap16md(hwmgr->thermal_controller
. advanceFanControlParameters.usFanOutputSensitivity))
;
1957 return 0;
1958}
1959
1960static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
1961{
1962 int i;
1963 struct tonga_smumgr *smu_data =
1964 (struct tonga_smumgr *)(hwmgr->smu_backend);
1965
1966 /* Currently not used. Set all to zero. */
1967 for (i = 0; i < 16; i++)
1968 smu_data->power_tune_table.GnbLPML[i] = 0;
1969
1970 return 0;
1971}
1972
1973static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
1974{
1975 struct tonga_smumgr *smu_data =
1976 (struct tonga_smumgr *)(hwmgr->smu_backend);
1977 struct phm_ppt_v1_information *table_info =
1978 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1979 uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
1980 uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
1981 struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
1982
1983 hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
1984 lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
1985
1986 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
1987 CONVERT_FROM_HOST_TO_SMC_US(hi_sidd)((hi_sidd) = (__uint16_t)(__builtin_constant_p(hi_sidd) ? (__uint16_t
)(((__uint16_t)(hi_sidd) & 0xffU) << 8 | ((__uint16_t
)(hi_sidd) & 0xff00U) >> 8) : __swap16md(hi_sidd)))
;
1988 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
1989 CONVERT_FROM_HOST_TO_SMC_US(lo_sidd)((lo_sidd) = (__uint16_t)(__builtin_constant_p(lo_sidd) ? (__uint16_t
)(((__uint16_t)(lo_sidd) & 0xffU) << 8 | ((__uint16_t
)(lo_sidd) & 0xff00U) >> 8) : __swap16md(lo_sidd)))
;
1990
1991 return 0;
1992}
1993
1994static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
1995{
1996 struct tonga_smumgr *smu_data =
1997 (struct tonga_smumgr *)(hwmgr->smu_backend);
1998 uint32_t pm_fuse_table_offset;
1999
2000 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2001 PHM_PlatformCaps_PowerContainment)) {
2002 if (smu7_read_smc_sram_dword(hwmgr,
2003 SMU72_FIRMWARE_HEADER_LOCATION0x20000 +
2004 offsetof(SMU72_Firmware_Header, PmFuseTable)__builtin_offsetof(SMU72_Firmware_Header, PmFuseTable),
2005 &pm_fuse_table_offset, SMC_RAM_END0x40000))
2006 PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to get pm_fuse_table_offset Failed !"
); return -22; } } while (0)
2007 "Attempt to get pm_fuse_table_offset Failed !",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to get pm_fuse_table_offset Failed !"
); return -22; } } while (0)
2008 return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to get pm_fuse_table_offset Failed !"
); return -22; } } while (0)
;
2009
2010 /* DW6 */
2011 if (tonga_populate_svi_load_line(hwmgr))
2012 PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate SviLoadLine Failed !"
); return -22; } } while (0)
2013 "Attempt to populate SviLoadLine Failed !",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate SviLoadLine Failed !"
); return -22; } } while (0)
2014 return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate SviLoadLine Failed !"
); return -22; } } while (0)
;
2015 /* DW7 */
2016 if (tonga_populate_tdc_limit(hwmgr))
2017 PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TDCLimit Failed !"
); return -22; } } while (0)
2018 "Attempt to populate TDCLimit Failed !",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TDCLimit Failed !"
); return -22; } } while (0)
2019 return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TDCLimit Failed !"
); return -22; } } while (0)
;
2020 /* DW8 */
2021 if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
2022 PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TdcWaterfallCtl Failed !"
); return -22; } } while (0)
2023 "Attempt to populate TdcWaterfallCtl Failed !",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TdcWaterfallCtl Failed !"
); return -22; } } while (0)
2024 return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TdcWaterfallCtl Failed !"
); return -22; } } while (0)
;
2025
2026 /* DW9-DW12 */
2027 if (tonga_populate_temperature_scaler(hwmgr) != 0)
2028 PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate LPMLTemperatureScaler Failed !"
); return -22; } } while (0)
2029 "Attempt to populate LPMLTemperatureScaler Failed !",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate LPMLTemperatureScaler Failed !"
); return -22; } } while (0)
2030 return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate LPMLTemperatureScaler Failed !"
); return -22; } } while (0)
;
2031
2032 /* DW13-DW14 */
2033 if (tonga_populate_fuzzy_fan(hwmgr))
2034 PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate Fuzzy Fan "
"Control parameters Failed !"); return -22; } } while (0)
2035 "Attempt to populate Fuzzy Fan "do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate Fuzzy Fan "
"Control parameters Failed !"); return -22; } } while (0)
2036 "Control parameters Failed !",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate Fuzzy Fan "
"Control parameters Failed !"); return -22; } } while (0)
2037 return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate Fuzzy Fan "
"Control parameters Failed !"); return -22; } } while (0)
;
2038
2039 /* DW15-DW18 */
2040 if (tonga_populate_gnb_lpml(hwmgr))
2041 PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate GnbLPML Failed !"
); return -22; } } while (0)
2042 "Attempt to populate GnbLPML Failed !",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate GnbLPML Failed !"
); return -22; } } while (0)
2043 return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate GnbLPML Failed !"
); return -22; } } while (0)
;
2044
2045 /* DW20 */
2046 if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
2047 PP_ASSERT_WITH_CODE(do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate BapmVddCBaseLeakage "
"Hi and Lo Sidd Failed !"); return -22; } } while (0)
2048 false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate BapmVddCBaseLeakage "
"Hi and Lo Sidd Failed !"); return -22; } } while (0)
2049 "Attempt to populate BapmVddCBaseLeakage "do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate BapmVddCBaseLeakage "
"Hi and Lo Sidd Failed !"); return -22; } } while (0)
2050 "Hi and Lo Sidd Failed !",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate BapmVddCBaseLeakage "
"Hi and Lo Sidd Failed !"); return -22; } } while (0)
2051 return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate BapmVddCBaseLeakage "
"Hi and Lo Sidd Failed !"); return -22; } } while (0)
;
2052
2053 if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
2054 (uint8_t *)&smu_data->power_tune_table,
2055 sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END0x40000))
2056 PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to download PmFuseTable Failed !"
); return -22; } } while (0)
2057 "Attempt to download PmFuseTable Failed !",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to download PmFuseTable Failed !"
); return -22; } } while (0)
2058 return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to download PmFuseTable Failed !"
); return -22; } } while (0)
;
2059 }
2060 return 0;
2061}
2062
2063static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
2064 SMU72_Discrete_MCRegisters *mc_reg_table)
2065{
2066 const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smu_backend;
2067
2068 uint32_t i, j;
2069
2070 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
2071 if (smu_data->mc_reg_table.validflag & 1<<j) {
2072 PP_ASSERT_WITH_CODE(do { if (!(i < 16)) { printk("\0014" "amdgpu: " "%s\n", "Index of mc_reg_table->address[] array "
"out of boundary"); return -22; } } while (0)
2073 i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,do { if (!(i < 16)) { printk("\0014" "amdgpu: " "%s\n", "Index of mc_reg_table->address[] array "
"out of boundary"); return -22; } } while (0)
2074 "Index of mc_reg_table->address[] array "do { if (!(i < 16)) { printk("\0014" "amdgpu: " "%s\n", "Index of mc_reg_table->address[] array "
"out of boundary"); return -22; } } while (0)
2075 "out of boundary",do { if (!(i < 16)) { printk("\0014" "amdgpu: " "%s\n", "Index of mc_reg_table->address[] array "
"out of boundary"); return -22; } } while (0)
2076 return -EINVAL)do { if (!(i < 16)) { printk("\0014" "amdgpu: " "%s\n", "Index of mc_reg_table->address[] array "
"out of boundary"); return -22; } } while (0)
;
2077 mc_reg_table->address[i].s0 =
2078 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0)(__uint16_t)(__builtin_constant_p(smu_data->mc_reg_table.mc_reg_address
[j].s0) ? (__uint16_t)(((__uint16_t)(smu_data->mc_reg_table
.mc_reg_address[j].s0) & 0xffU) << 8 | ((__uint16_t
)(smu_data->mc_reg_table.mc_reg_address[j].s0) & 0xff00U
) >> 8) : __swap16md(smu_data->mc_reg_table.mc_reg_address
[j].s0))
;
2079 mc_reg_table->address[i].s1 =
2080 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1)(__uint16_t)(__builtin_constant_p(smu_data->mc_reg_table.mc_reg_address
[j].s1) ? (__uint16_t)(((__uint16_t)(smu_data->mc_reg_table
.mc_reg_address[j].s1) & 0xffU) << 8 | ((__uint16_t
)(smu_data->mc_reg_table.mc_reg_address[j].s1) & 0xff00U
) >> 8) : __swap16md(smu_data->mc_reg_table.mc_reg_address
[j].s1))
;
2081 i++;
2082 }
2083 }
2084
2085 mc_reg_table->last = (uint8_t)i;
2086
2087 return 0;
2088}
2089
2090/*convert register values from driver to SMC format */
2091static void tonga_convert_mc_registers(
2092 const struct tonga_mc_reg_entry *entry,
2093 SMU72_Discrete_MCRegisterSet *data,
2094 uint32_t num_entries, uint32_t valid_flag)
2095{
2096 uint32_t i, j;
2097
2098 for (i = 0, j = 0; j < num_entries; j++) {
2099 if (valid_flag & 1<<j) {
2100 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j])(__uint32_t)(__builtin_constant_p(entry->mc_data[j]) ? (__uint32_t
)(((__uint32_t)(entry->mc_data[j]) & 0xff) << 24
| ((__uint32_t)(entry->mc_data[j]) & 0xff00) <<
8 | ((__uint32_t)(entry->mc_data[j]) & 0xff0000) >>
8 | ((__uint32_t)(entry->mc_data[j]) & 0xff000000) >>
24) : __swap32md(entry->mc_data[j]))
;
2101 i++;
2102 }
2103 }
2104}
2105
2106static int tonga_convert_mc_reg_table_entry_to_smc(
2107 struct pp_hwmgr *hwmgr,
2108 const uint32_t memory_clock,
2109 SMU72_Discrete_MCRegisterSet *mc_reg_table_data
2110 )
2111{
2112 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2113 uint32_t i = 0;
2114
2115 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
2116 if (memory_clock <=
2117 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
2118 break;
2119 }
2120 }
2121
2122 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
2123 --i;
2124
2125 tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
2126 mc_reg_table_data, smu_data->mc_reg_table.last,
2127 smu_data->mc_reg_table.validflag);
2128
2129 return 0;
2130}
2131
2132static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
2133 SMU72_Discrete_MCRegisters *mc_regs)
2134{
2135 int result = 0;
2136 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2137 int res;
2138 uint32_t i;
2139
2140 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
2141 res = tonga_convert_mc_reg_table_entry_to_smc(
2142 hwmgr,
2143 data->dpm_table.mclk_table.dpm_levels[i].value,
2144 &mc_regs->data[i]
2145 );
2146
2147 if (0 != res)
2148 result = res;
2149 }
2150
2151 return result;
2152}
2153
2154static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
2155{
2156 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2157 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2158 uint32_t address;
2159 int32_t result;
2160
2161 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK0x00000002))
2162 return 0;
2163
2164
2165 memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters))__builtin_memset((&smu_data->mc_regs), (0), (sizeof(SMU72_Discrete_MCRegisters
)))
;
2166
2167 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
2168
2169 if (result != 0)
2170 return result;
2171
2172
2173 address = smu_data->smu7_data.mc_reg_table_start +
2174 (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0])__builtin_offsetof(SMU72_Discrete_MCRegisters, data[0]);
2175
2176 return smu7_copy_bytes_to_smc(
2177 hwmgr, address,
2178 (uint8_t *)&smu_data->mc_regs.data[0],
2179 sizeof(SMU72_Discrete_MCRegisterSet) *
2180 data->dpm_table.mclk_table.count,
2181 SMC_RAM_END0x40000);
2182}
2183
2184static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
2185{
2186 int result;
2187 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2188
2189 memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters))__builtin_memset((&smu_data->mc_regs), (0x00), (sizeof
(SMU72_Discrete_MCRegisters)))
;
2190 result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
2191 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize MCRegTable for the MC register addresses !"
); return result;; } } while (0)
2192 "Failed to initialize MCRegTable for the MC register addresses !",do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize MCRegTable for the MC register addresses !"
); return result;; } } while (0)
2193 return result;)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize MCRegTable for the MC register addresses !"
); return result;; } } while (0)
;
2194
2195 result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
2196 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize MCRegTable for driver state !"
); return result;; } } while (0)
2197 "Failed to initialize MCRegTable for driver state !",do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize MCRegTable for driver state !"
); return result;; } } while (0)
2198 return result;)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize MCRegTable for driver state !"
); return result;; } } while (0)
;
2199
2200 return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
2201 (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END0x40000);
2202}
2203
2204static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
2205{
2206 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2207 struct phm_ppt_v1_information *table_info =
2208 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2209
2210 if (table_info &&
2211 table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX1 &&
2212 table_info->cac_dtp_table->usPowerTuneDataSetID)
2213 smu_data->power_tune_defaults =
2214 &tonga_power_tune_data_set_array
2215 [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
2216 else
2217 smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
2218}
2219
2220static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
2221{
2222 int result;
2223 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2224 struct tonga_smumgr *smu_data =
2225 (struct tonga_smumgr *)(hwmgr->smu_backend);
2226 SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
2227 struct phm_ppt_v1_information *table_info =
2228 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2229
2230 uint8_t i;
2231 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2232
2233
2234 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table))__builtin_memset((&(smu_data->smc_state_table)), (0x00
), (sizeof(smu_data->smc_state_table)))
;
2235
2236 tonga_initialize_power_tune_defaults(hwmgr);
2237
2238 if (SMU7_VOLTAGE_CONTROL_NONE0x0 != data->voltage_control)
2239 tonga_populate_smc_voltage_tables(hwmgr, table);
2240
2241 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2242 PHM_PlatformCaps_AutomaticDCTransition))
2243 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC0x01;
2244
2245
2246 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2247 PHM_PlatformCaps_StepVddc))
2248 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC0x02;
2249
2250 if (data->is_memory_gddr5)
2251 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR50x04;
2252
2253 i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN)((((((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0x96e))) & 0x1e) >> 0x1)
;
2254
2255 if (i == 1 || i == 0)
2256 table->SystemFlags |= 0x40;
2257
2258 if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
2259 result = tonga_populate_ulv_state(hwmgr, table);
2260 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ULV state !"
); return result;; } } while (0)
2261 "Failed to initialize ULV state !",do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ULV state !"
); return result;; } } while (0)
2262 return result;)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ULV state !"
); return result;; } } while (0)
;
2263
2264 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc020015c,0x40035))
2265 ixCG_ULV_PARAMETER, 0x40035)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc020015c,0x40035))
;
2266 }
2267
2268 result = tonga_populate_smc_link_level(hwmgr, table);
2269 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Link Level !"
); return result; } } while (0)
2270 "Failed to initialize Link Level !", return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Link Level !"
); return result; } } while (0)
;
2271
2272 result = tonga_populate_all_graphic_levels(hwmgr);
2273 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Graphics Level !"
); return result; } } while (0)
2274 "Failed to initialize Graphics Level !", return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Graphics Level !"
); return result; } } while (0)
;
2275
2276 result = tonga_populate_all_memory_levels(hwmgr);
2277 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Memory Level !"
); return result; } } while (0)
2278 "Failed to initialize Memory Level !", return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Memory Level !"
); return result; } } while (0)
;
2279
2280 result = tonga_populate_smc_acpi_level(hwmgr, table);
2281 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ACPI Level !"
); return result; } } while (0)
2282 "Failed to initialize ACPI Level !", return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ACPI Level !"
); return result; } } while (0)
;
2283
2284 result = tonga_populate_smc_vce_level(hwmgr, table);
2285 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize VCE Level !"
); return result; } } while (0)
2286 "Failed to initialize VCE Level !", return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize VCE Level !"
); return result; } } while (0)
;
2287
2288 result = tonga_populate_smc_acp_level(hwmgr, table);
2289 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ACP Level !"
); return result; } } while (0)
2290 "Failed to initialize ACP Level !", return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ACP Level !"
); return result; } } while (0)
;
2291
2292 /* Since only the initial state is completely set up at this
2293 * point (the other states are just copies of the boot state) we only
2294 * need to populate the ARB settings for the initial state.
2295 */
2296 result = tonga_program_memory_timing_parameters(hwmgr);
2297 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to Write ARB settings for the initial state."
); return result;; } } while (0)
2298 "Failed to Write ARB settings for the initial state.",do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to Write ARB settings for the initial state."
); return result;; } } while (0)
2299 return result;)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to Write ARB settings for the initial state."
); return result;; } } while (0)
;
2300
2301 result = tonga_populate_smc_uvd_level(hwmgr, table);
2302 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize UVD Level !"
); return result; } } while (0)
2303 "Failed to initialize UVD Level !", return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize UVD Level !"
); return result; } } while (0)
;
2304
2305 result = tonga_populate_smc_boot_level(hwmgr, table);
2306 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Boot Level !"
); return result; } } while (0)
2307 "Failed to initialize Boot Level !", return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Boot Level !"
); return result; } } while (0)
;
2308
2309 tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
2310 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate BAPM Parameters !"
); return result; } } while (0)
2311 "Failed to populate BAPM Parameters !", return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate BAPM Parameters !"
); return result; } } while (0)
;
2312
2313 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2314 PHM_PlatformCaps_ClockStretcher)) {
2315 result = tonga_populate_clock_stretcher_data_table(hwmgr);
2316 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate Clock Stretcher Data Table !"
); return result;; } } while (0)
2317 "Failed to populate Clock Stretcher Data Table !",do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate Clock Stretcher Data Table !"
); return result;; } } while (0)
2318 return result;)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate Clock Stretcher Data Table !"
); return result;; } } while (0)
;
2319 }
2320 table->GraphicsVoltageChangeEnable = 1;
2321 table->GraphicsThermThrottleEnable = 1;
2322 table->GraphicsInterval = 1;
2323 table->VoltageInterval = 1;
2324 table->ThermalInterval = 1;
2325 table->TemperatureLimitHigh =
2326 table_info->cac_dtp_table->usTargetOperatingTemp *
2327 SMU7_Q88_FORMAT_CONVERSION_UNIT256;
2328 table->TemperatureLimitLow =
2329 (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
2330 SMU7_Q88_FORMAT_CONVERSION_UNIT256;
2331 table->MemoryVoltageChangeEnable = 1;
2332 table->MemoryInterval = 1;
2333 table->VoltageResponseTime = 0;
2334 table->PhaseResponseTime = 0;
2335 table->MemoryThermThrottleEnable = 1;
2336
2337 /*
2338 * Cail reads current link status and reports it as cap (we cannot
2339 * change this due to some previous issues we had)
2340 * SMC drops the link status to lowest level after enabling
2341 * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
2342 * but this time Cail reads current link status which was set to low by
2343 * SMC and reports it as cap to powerplay
2344 * To avoid it, we set PCIeBootLinkLevel to highest dpm level
2345 */
2346 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),do { if (!((1 <= data->dpm_table.pcie_speed_table.count
))) { printk("\0014" "amdgpu: " "%s\n", "There must be 1 or more PCIE levels defined in PPTable."
); return -22; } } while (0)
2347 "There must be 1 or more PCIE levels defined in PPTable.",do { if (!((1 <= data->dpm_table.pcie_speed_table.count
))) { printk("\0014" "amdgpu: " "%s\n", "There must be 1 or more PCIE levels defined in PPTable."
); return -22; } } while (0)
2348 return -EINVAL)do { if (!((1 <= data->dpm_table.pcie_speed_table.count
))) { printk("\0014" "amdgpu: " "%s\n", "There must be 1 or more PCIE levels defined in PPTable."
); return -22; } } while (0)
;
2349
2350 table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
2351
2352 table->PCIeGenInterval = 1;
2353
2354 result = tonga_populate_vr_config(hwmgr, table);
2355 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate VRConfig setting !"
); return result; } } while (0)
2356 "Failed to populate VRConfig setting !", return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate VRConfig setting !"
); return result; } } while (0)
;
2357 data->vr_config = table->VRConfig;
2358 table->ThermGpio = 17;
2359 table->SclkStepSize = 0x4000;
2360
2361 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID61,
2362 &gpio_pin_assignment)) {
2363 table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
2364 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2365 PHM_PlatformCaps_RegulatorHot);
2366 } else {
2367 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN0x7F;
2368 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2369 PHM_PlatformCaps_RegulatorHot);
2370 }
2371
2372 if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID60,
2373 &gpio_pin_assignment)) {
2374 table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
2375 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2376 PHM_PlatformCaps_AutomaticDCTransition);
2377 } else {
2378 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN0x7F;
2379 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2380 PHM_PlatformCaps_AutomaticDCTransition);
2381 }
2382
2383 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2384 PHM_PlatformCaps_Falcon_QuickTransition);
2385
2386 if (0) {
2387 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2388 PHM_PlatformCaps_AutomaticDCTransition);
2389 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2390 PHM_PlatformCaps_Falcon_QuickTransition);
2391 }
2392
2393 if (atomctrl_get_pp_assign_pin(hwmgr,
2394 THERMAL_INT_OUTPUT_GPIO_PINID65, &gpio_pin_assignment)) {
2395 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2396 PHM_PlatformCaps_ThermalOutGPIO);
2397
2398 table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
2399
2400 table->ThermOutPolarity =
2401 (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)(((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0x183))
&
2402 (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
2403
2404 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY0x1;
2405
2406 /* if required, combine VRHot/PCC with thermal out GPIO*/
2407 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2408 PHM_PlatformCaps_RegulatorHot) &&
2409 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2410 PHM_PlatformCaps_CombinePCCWithThermalSignal)){
2411 table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT0x2;
2412 }
2413 } else {
2414 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2415 PHM_PlatformCaps_ThermalOutGPIO);
2416
2417 table->ThermOutGpio = 17;
2418 table->ThermOutPolarity = 1;
2419 table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE0x0;
2420 }
2421
2422 for (i = 0; i < SMU72_MAX_ENTRIES_SMIO32; i++)
2423 table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i])(__uint32_t)(__builtin_constant_p(table->Smio[i]) ? (__uint32_t
)(((__uint32_t)(table->Smio[i]) & 0xff) << 24 | (
(__uint32_t)(table->Smio[i]) & 0xff00) << 8 | ((
__uint32_t)(table->Smio[i]) & 0xff0000) >> 8 | (
(__uint32_t)(table->Smio[i]) & 0xff000000) >> 24
) : __swap32md(table->Smio[i]))
;
2424 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags)((table->SystemFlags) = (__uint32_t)(__builtin_constant_p(
table->SystemFlags) ? (__uint32_t)(((__uint32_t)(table->
SystemFlags) & 0xff) << 24 | ((__uint32_t)(table->
SystemFlags) & 0xff00) << 8 | ((__uint32_t)(table->
SystemFlags) & 0xff0000) >> 8 | ((__uint32_t)(table
->SystemFlags) & 0xff000000) >> 24) : __swap32md
(table->SystemFlags)))
;
2425 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig)((table->VRConfig) = (__uint32_t)(__builtin_constant_p(table
->VRConfig) ? (__uint32_t)(((__uint32_t)(table->VRConfig
) & 0xff) << 24 | ((__uint32_t)(table->VRConfig)
& 0xff00) << 8 | ((__uint32_t)(table->VRConfig)
& 0xff0000) >> 8 | ((__uint32_t)(table->VRConfig
) & 0xff000000) >> 24) : __swap32md(table->VRConfig
)))
;
2426 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1)((table->SmioMask1) = (__uint32_t)(__builtin_constant_p(table
->SmioMask1) ? (__uint32_t)(((__uint32_t)(table->SmioMask1
) & 0xff) << 24 | ((__uint32_t)(table->SmioMask1
) & 0xff00) << 8 | ((__uint32_t)(table->SmioMask1
) & 0xff0000) >> 8 | ((__uint32_t)(table->SmioMask1
) & 0xff000000) >> 24) : __swap32md(table->SmioMask1
)))
;
2427 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2)((table->SmioMask2) = (__uint32_t)(__builtin_constant_p(table
->SmioMask2) ? (__uint32_t)(((__uint32_t)(table->SmioMask2
) & 0xff) << 24 | ((__uint32_t)(table->SmioMask2
) & 0xff00) << 8 | ((__uint32_t)(table->SmioMask2
) & 0xff0000) >> 8 | ((__uint32_t)(table->SmioMask2
) & 0xff000000) >> 24) : __swap32md(table->SmioMask2
)))
;
2428 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize)((table->SclkStepSize) = (__uint32_t)(__builtin_constant_p
(table->SclkStepSize) ? (__uint32_t)(((__uint32_t)(table->
SclkStepSize) & 0xff) << 24 | ((__uint32_t)(table->
SclkStepSize) & 0xff00) << 8 | ((__uint32_t)(table->
SclkStepSize) & 0xff0000) >> 8 | ((__uint32_t)(table
->SclkStepSize) & 0xff000000) >> 24) : __swap32md
(table->SclkStepSize)))
;
2429 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh)((table->TemperatureLimitHigh) = (__uint16_t)(__builtin_constant_p
(table->TemperatureLimitHigh) ? (__uint16_t)(((__uint16_t)
(table->TemperatureLimitHigh) & 0xffU) << 8 | ((
__uint16_t)(table->TemperatureLimitHigh) & 0xff00U) >>
8) : __swap16md(table->TemperatureLimitHigh)))
;
2430 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow)((table->TemperatureLimitLow) = (__uint16_t)(__builtin_constant_p
(table->TemperatureLimitLow) ? (__uint16_t)(((__uint16_t)(
table->TemperatureLimitLow) & 0xffU) << 8 | ((__uint16_t
)(table->TemperatureLimitLow) & 0xff00U) >> 8) :
__swap16md(table->TemperatureLimitLow)))
;
2431 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime)((table->VoltageResponseTime) = (__uint16_t)(__builtin_constant_p
(table->VoltageResponseTime) ? (__uint16_t)(((__uint16_t)(
table->VoltageResponseTime) & 0xffU) << 8 | ((__uint16_t
)(table->VoltageResponseTime) & 0xff00U) >> 8) :
__swap16md(table->VoltageResponseTime)))
;
2432 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime)((table->PhaseResponseTime) = (__uint16_t)(__builtin_constant_p
(table->PhaseResponseTime) ? (__uint16_t)(((__uint16_t)(table
->PhaseResponseTime) & 0xffU) << 8 | ((__uint16_t
)(table->PhaseResponseTime) & 0xff00U) >> 8) : __swap16md
(table->PhaseResponseTime)))
;
2433
2434 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2435 result = smu7_copy_bytes_to_smc(
2436 hwmgr,
2437 smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags)__builtin_offsetof(SMU72_Discrete_DpmTable, SystemFlags),
2438 (uint8_t *)&(table->SystemFlags),
2439 sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
2440 SMC_RAM_END0x40000);
2441
2442 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload dpm data to SMC memory !"
); return result;; } } while (0)
2443 "Failed to upload dpm data to SMC memory !", return result;)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload dpm data to SMC memory !"
); return result;; } } while (0)
;
2444
2445 result = tonga_init_arb_table_index(hwmgr);
2446 PP_ASSERT_WITH_CODE(!result,do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload arb data to SMC memory !"
); return result; } } while (0)
2447 "Failed to upload arb data to SMC memory !", return result)do { if (!(!result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload arb data to SMC memory !"
); return result; } } while (0)
;
2448
2449 tonga_populate_pm_fuses(hwmgr);
2450 PP_ASSERT_WITH_CODE((!result),do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate initialize pm fuses !"
); return result; } } while (0)
2451 "Failed to populate initialize pm fuses !", return result)do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate initialize pm fuses !"
); return result; } } while (0)
;
2452
2453 result = tonga_populate_initial_mc_reg_table(hwmgr);
2454 PP_ASSERT_WITH_CODE((!result),do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate initialize MC Reg table !"
); return result; } } while (0)
2455 "Failed to populate initialize MC Reg table !", return result)do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate initialize MC Reg table !"
); return result; } } while (0)
;
2456
2457 return 0;
2458}
2459
2460static int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2461{
2462 struct tonga_smumgr *smu_data =
2463 (struct tonga_smumgr *)(hwmgr->smu_backend);
2464 SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE0 };
2465 uint32_t duty100;
2466 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2467 uint16_t fdo_min, slope1, slope2;
2468 uint32_t reference_clock;
2469 int res;
2470 uint64_t tmp64;
2471
2472 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2473 PHM_PlatformCaps_MicrocodeFanControl))
2474 return 0;
2475
2476 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2477 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2478 PHM_PlatformCaps_MicrocodeFanControl);
2479 return 0;
2480 }
2481
2482 if (0 == smu_data->smu7_data.fan_table_start) {
2483 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2484 PHM_PlatformCaps_MicrocodeFanControl);
2485 return 0;
2486 }
2487
2488 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0300068))) & 0xff) >>
0x0)
2489 CGS_IND_REG__SMC,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0300068))) & 0xff) >>
0x0)
2490 CG_FDO_CTRL1, FMAX_DUTY100)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0300068))) & 0xff) >>
0x0)
;
2491
2492 if (0 == duty100) {
2493 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2494 PHM_PlatformCaps_MicrocodeFanControl);
2495 return 0;
2496 }
2497
2498 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2499 do_div(tmp64, 10000)({ uint32_t __base = (10000); uint32_t __rem = ((uint64_t)(tmp64
)) % __base; (tmp64) = ((uint64_t)(tmp64)) / __base; __rem; }
)
;
2500 fdo_min = (uint16_t)tmp64;
2501
2502 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
2503 hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2504 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
2505 hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2506
2507 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
2508 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2509 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
2510 hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2511
2512 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2513 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2514
2515 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100)(__uint16_t)(__builtin_constant_p((50 + hwmgr->thermal_controller
.advanceFanControlParameters.usTMin) / 100) ? (__uint16_t)(((
__uint16_t)((50 + hwmgr->thermal_controller.advanceFanControlParameters
.usTMin) / 100) & 0xffU) << 8 | ((__uint16_t)((50 +
hwmgr->thermal_controller.advanceFanControlParameters.usTMin
) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr->
thermal_controller.advanceFanControlParameters.usTMin) / 100)
)
;
2516 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100)(__uint16_t)(__builtin_constant_p((50 + hwmgr->thermal_controller
.advanceFanControlParameters.usTMed) / 100) ? (__uint16_t)(((
__uint16_t)((50 + hwmgr->thermal_controller.advanceFanControlParameters
.usTMed) / 100) & 0xffU) << 8 | ((__uint16_t)((50 +
hwmgr->thermal_controller.advanceFanControlParameters.usTMed
) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr->
thermal_controller.advanceFanControlParameters.usTMed) / 100)
)
;
2517 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100)(__uint16_t)(__builtin_constant_p((50 + hwmgr->thermal_controller
.advanceFanControlParameters.usTMax) / 100) ? (__uint16_t)(((
__uint16_t)((50 + hwmgr->thermal_controller.advanceFanControlParameters
.usTMax) / 100) & 0xffU) << 8 | ((__uint16_t)((50 +
hwmgr->thermal_controller.advanceFanControlParameters.usTMax
) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr->
thermal_controller.advanceFanControlParameters.usTMax) / 100)
)
;
2518
2519 fan_table.Slope1 = cpu_to_be16(slope1)(__uint16_t)(__builtin_constant_p(slope1) ? (__uint16_t)(((__uint16_t
)(slope1) & 0xffU) << 8 | ((__uint16_t)(slope1) &
0xff00U) >> 8) : __swap16md(slope1))
;
2520 fan_table.Slope2 = cpu_to_be16(slope2)(__uint16_t)(__builtin_constant_p(slope2) ? (__uint16_t)(((__uint16_t
)(slope2) & 0xffU) << 8 | ((__uint16_t)(slope2) &
0xff00U) >> 8) : __swap16md(slope2))
;
2521
2522 fan_table.FdoMin = cpu_to_be16(fdo_min)(__uint16_t)(__builtin_constant_p(fdo_min) ? (__uint16_t)(((__uint16_t
)(fdo_min) & 0xffU) << 8 | ((__uint16_t)(fdo_min) &
0xff00U) >> 8) : __swap16md(fdo_min))
;
2523
2524 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst)(__uint16_t)(__builtin_constant_p(hwmgr->thermal_controller
.advanceFanControlParameters.ucTHyst) ? (__uint16_t)(((__uint16_t
)(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst
) & 0xffU) << 8 | ((__uint16_t)(hwmgr->thermal_controller
.advanceFanControlParameters.ucTHyst) & 0xff00U) >>
8) : __swap16md(hwmgr->thermal_controller.advanceFanControlParameters
.ucTHyst))
;
2525
2526 fan_table.HystUp = cpu_to_be16(1)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t
)(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U
) >> 8) : __swap16md(1))
;
2527
2528 fan_table.HystSlope = cpu_to_be16(1)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t
)(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U
) >> 8) : __swap16md(1))
;
2529
2530 fan_table.TempRespLim = cpu_to_be16(5)(__uint16_t)(__builtin_constant_p(5) ? (__uint16_t)(((__uint16_t
)(5) & 0xffU) << 8 | ((__uint16_t)(5) & 0xff00U
) >> 8) : __swap16md(5))
;
2531
2532 reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev)((struct amdgpu_device *)hwmgr->adev)->asic_funcs->get_xclk
(((struct amdgpu_device *)hwmgr->adev))
;
2533
2534 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600)(__uint32_t)(__builtin_constant_p((hwmgr->thermal_controller
.advanceFanControlParameters.ulCycleDelay * reference_clock) /
1600) ? (__uint32_t)(((__uint32_t)((hwmgr->thermal_controller
.advanceFanControlParameters.ulCycleDelay * reference_clock) /
1600) & 0xff) << 24 | ((__uint32_t)((hwmgr->thermal_controller
.advanceFanControlParameters.ulCycleDelay * reference_clock) /
1600) & 0xff00) << 8 | ((__uint32_t)((hwmgr->thermal_controller
.advanceFanControlParameters.ulCycleDelay * reference_clock) /
1600) & 0xff0000) >> 8 | ((__uint32_t)((hwmgr->
thermal_controller.advanceFanControlParameters.ulCycleDelay *
reference_clock) / 1600) & 0xff000000) >> 24) : __swap32md
((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
* reference_clock) / 1600))
;
2535
2536 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100)(__uint16_t)(__builtin_constant_p((uint16_t)duty100) ? (__uint16_t
)(((__uint16_t)((uint16_t)duty100) & 0xffU) << 8 | (
(__uint16_t)((uint16_t)duty100) & 0xff00U) >> 8) : __swap16md
((uint16_t)duty100))
;
2537
2538 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0xc0300010))) & 0xff00000
) >> 0x14)
;
2539
2540 fan_table.FanControl_GL_Flag = 1;
2541
2542 res = smu7_copy_bytes_to_smc(hwmgr,
2543 smu_data->smu7_data.fan_table_start,
2544 (uint8_t *)&fan_table,
2545 (uint32_t)sizeof(fan_table),
2546 SMC_RAM_END0x40000);
2547
2548 return 0;
2549}
2550
2551
2552static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2553{
2554 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2555
2556 if (data->need_update_smu7_dpm_table &
2557 (DPMTABLE_OD_UPDATE_SCLK0x00000001 + DPMTABLE_OD_UPDATE_MCLK0x00000002))
2558 return tonga_program_memory_timing_parameters(hwmgr);
2559
2560 return 0;
2561}
2562
2563static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2564{
2565 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2566 struct tonga_smumgr *smu_data =
2567 (struct tonga_smumgr *)(hwmgr->smu_backend);
2568
2569 int result = 0;
2570 uint32_t low_sclk_interrupt_threshold = 0;
2571
2572 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2573 PHM_PlatformCaps_SclkThrottleLowNotification)
2574 && (data->low_sclk_interrupt_threshold != 0)) {
2575 low_sclk_interrupt_threshold =
2576 data->low_sclk_interrupt_threshold;
2577
2578 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold)((low_sclk_interrupt_threshold) = (__uint32_t)(__builtin_constant_p
(low_sclk_interrupt_threshold) ? (__uint32_t)(((__uint32_t)(low_sclk_interrupt_threshold
) & 0xff) << 24 | ((__uint32_t)(low_sclk_interrupt_threshold
) & 0xff00) << 8 | ((__uint32_t)(low_sclk_interrupt_threshold
) & 0xff0000) >> 8 | ((__uint32_t)(low_sclk_interrupt_threshold
) & 0xff000000) >> 24) : __swap32md(low_sclk_interrupt_threshold
)))
;
2579
2580 result = smu7_copy_bytes_to_smc(
2581 hwmgr,
2582 smu_data->smu7_data.dpm_table_start +
2583 offsetof(SMU72_Discrete_DpmTable,__builtin_offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold
)
2584 LowSclkInterruptThreshold)__builtin_offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold
)
,
2585 (uint8_t *)&low_sclk_interrupt_threshold,
2586 sizeof(uint32_t),
2587 SMC_RAM_END0x40000);
2588 }
2589
2590 result = tonga_update_and_upload_mc_reg_table(hwmgr);
2591
2592 PP_ASSERT_WITH_CODE((!result),do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload MC reg table !"
); return result; } } while (0)
2593 "Failed to upload MC reg table !",do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload MC reg table !"
); return result; } } while (0)
2594 return result)do { if (!((!result))) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload MC reg table !"
); return result; } } while (0)
;
2595
2596 result = tonga_program_mem_timing_parameters(hwmgr);
2597 PP_ASSERT_WITH_CODE((result == 0),do { if (!((result == 0))) { printk("\0014" "amdgpu: " "%s\n"
, "Failed to program memory timing parameters !"); ; } } while
(0)
2598 "Failed to program memory timing parameters !",do { if (!((result == 0))) { printk("\0014" "amdgpu: " "%s\n"
, "Failed to program memory timing parameters !"); ; } } while
(0)
2599 )do { if (!((result == 0))) { printk("\0014" "amdgpu: " "%s\n"
, "Failed to program memory timing parameters !"); ; } } while
(0)
;
2600
2601 return result;
2602}
2603
2604static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
2605{
2606 switch (type) {
2607 case SMU_SoftRegisters:
2608 switch (member) {
2609 case HandshakeDisables:
2610 return offsetof(SMU72_SoftRegisters, HandshakeDisables)__builtin_offsetof(SMU72_SoftRegisters, HandshakeDisables);
2611 case VoltageChangeTimeout:
2612 return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout)__builtin_offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
2613 case AverageGraphicsActivity:
2614 return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity)__builtin_offsetof(SMU72_SoftRegisters, AverageGraphicsActivity
)
;
2615 case AverageMemoryActivity:
2616 return offsetof(SMU72_SoftRegisters, AverageMemoryActivity)__builtin_offsetof(SMU72_SoftRegisters, AverageMemoryActivity
)
;
2617 case PreVBlankGap:
2618 return offsetof(SMU72_SoftRegisters, PreVBlankGap)__builtin_offsetof(SMU72_SoftRegisters, PreVBlankGap);
2619 case VBlankTimeout:
2620 return offsetof(SMU72_SoftRegisters, VBlankTimeout)__builtin_offsetof(SMU72_SoftRegisters, VBlankTimeout);
2621 case UcodeLoadStatus:
2622 return offsetof(SMU72_SoftRegisters, UcodeLoadStatus)__builtin_offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
2623 case DRAM_LOG_ADDR_H:
2624 return offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_H)__builtin_offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_H);
2625 case DRAM_LOG_ADDR_L:
2626 return offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_L)__builtin_offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_L);
2627 case DRAM_LOG_PHY_ADDR_H:
2628 return offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_H)__builtin_offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2629 case DRAM_LOG_PHY_ADDR_L:
2630 return offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_L)__builtin_offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2631 case DRAM_LOG_BUFF_SIZE:
2632 return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE)__builtin_offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2633 }
2634 break;
2635 case SMU_Discrete_DpmTable:
2636 switch (member) {
2637 case UvdBootLevel:
2638 return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel)__builtin_offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
2639 case VceBootLevel:
2640 return offsetof(SMU72_Discrete_DpmTable, VceBootLevel)__builtin_offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
2641 case LowSclkInterruptThreshold:
2642 return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold)__builtin_offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold
)
;
2643 }
2644 break;
2645 }
2646 pr_warn("can't get the offset of type %x member %x\n", type, member)printk("\0014" "amdgpu: " "can't get the offset of type %x member %x\n"
, type, member)
;
2647 return 0;
2648}
2649
2650static uint32_t tonga_get_mac_definition(uint32_t value)
2651{
2652 switch (value) {
2653 case SMU_MAX_LEVELS_GRAPHICS:
2654 return SMU72_MAX_LEVELS_GRAPHICS8;
2655 case SMU_MAX_LEVELS_MEMORY:
2656 return SMU72_MAX_LEVELS_MEMORY4;
2657 case SMU_MAX_LEVELS_LINK:
2658 return SMU72_MAX_LEVELS_LINK8;
2659 case SMU_MAX_ENTRIES_SMIO:
2660 return SMU72_MAX_ENTRIES_SMIO32;
2661 case SMU_MAX_LEVELS_VDDC:
2662 return SMU72_MAX_LEVELS_VDDC16;
2663 case SMU_MAX_LEVELS_VDDGFX:
2664 return SMU72_MAX_LEVELS_VDDGFX16;
2665 case SMU_MAX_LEVELS_VDDCI:
2666 return SMU72_MAX_LEVELS_VDDCI8;
2667 case SMU_MAX_LEVELS_MVDD:
2668 return SMU72_MAX_LEVELS_MVDD4;
2669 }
2670 pr_warn("can't get the mac value %x\n", value)printk("\0014" "amdgpu: " "can't get the mac value %x\n", value
)
;
2671
2672 return 0;
2673}
2674
2675static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2676{
2677 struct tonga_smumgr *smu_data =
2678 (struct tonga_smumgr *)(hwmgr->smu_backend);
2679 uint32_t mm_boot_level_offset, mm_boot_level_value;
2680 struct phm_ppt_v1_information *table_info =
2681 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2682
2683 smu_data->smc_state_table.UvdBootLevel = 0;
2684 if (table_info->mm_dep_table->count > 0)
2685 smu_data->smc_state_table.UvdBootLevel =
2686 (uint8_t) (table_info->mm_dep_table->count - 1);
2687 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2688 offsetof(SMU72_Discrete_DpmTable, UvdBootLevel)__builtin_offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
2689 mm_boot_level_offset /= 4;
2690 mm_boot_level_offset *= 4;
2691 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset))
2692 CGS_IND_REG__SMC, mm_boot_level_offset)(((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset))
;
2693 mm_boot_level_value &= 0x00FFFFFF;
2694 mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
2695 cgs_write_ind_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value
))
2696 CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value
))
2697 mm_boot_level_offset, mm_boot_level_value)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value
))
;
2698
2699 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2700 PHM_PlatformCaps_UVDDPM) ||
2701 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2702 PHM_PlatformCaps_StablePState))
2703 smum_send_msg_to_smc_with_parameter(hwmgr,
2704 PPSMC_MSG_UVDDPM_SetEnabledMask((uint16_t) 0x12D),
2705 (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
2706 NULL((void *)0));
2707 return 0;
2708}
2709
2710static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2711{
2712 struct tonga_smumgr *smu_data =
2713 (struct tonga_smumgr *)(hwmgr->smu_backend);
2714 uint32_t mm_boot_level_offset, mm_boot_level_value;
2715 struct phm_ppt_v1_information *table_info =
2716 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2717
2718
2719 smu_data->smc_state_table.VceBootLevel =
2720 (uint8_t) (table_info->mm_dep_table->count - 1);
2721
2722 mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
2723 offsetof(SMU72_Discrete_DpmTable, VceBootLevel)__builtin_offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
2724 mm_boot_level_offset /= 4;
2725 mm_boot_level_offset *= 4;
2726 mm_boot_level_value = cgs_read_ind_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset))
2727 CGS_IND_REG__SMC, mm_boot_level_offset)(((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset))
;
2728 mm_boot_level_value &= 0xFF00FFFF;
2729 mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
2730 cgs_write_ind_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value
))
2731 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value
))
;
2732
2733 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2734 PHM_PlatformCaps_StablePState))
2735 smum_send_msg_to_smc_with_parameter(hwmgr,
2736 PPSMC_MSG_VCEDPM_SetEnabledMask((uint16_t) 0x12E),
2737 (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
2738 NULL((void *)0));
2739 return 0;
2740}
2741
2742static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2743{
2744 switch (type) {
2745 case SMU_UVD_TABLE:
2746 tonga_update_uvd_smc_table(hwmgr);
2747 break;
2748 case SMU_VCE_TABLE:
2749 tonga_update_vce_smc_table(hwmgr);
2750 break;
2751 default:
2752 break;
2753 }
2754 return 0;
2755}
2756
2757static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
2758{
2759 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2760 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
2761
2762 uint32_t tmp;
2763 int result;
2764 bool_Bool error = false0;
2765
2766 result = smu7_read_smc_sram_dword(hwmgr,
2767 SMU72_FIRMWARE_HEADER_LOCATION0x20000 +
2768 offsetof(SMU72_Firmware_Header, DpmTable)__builtin_offsetof(SMU72_Firmware_Header, DpmTable),
2769 &tmp, SMC_RAM_END0x40000);
2770
2771 if (!result)
2772 smu_data->smu7_data.dpm_table_start = tmp;
2773
2774 error |= (result != 0);
2775
2776 result = smu7_read_smc_sram_dword(hwmgr,
2777 SMU72_FIRMWARE_HEADER_LOCATION0x20000 +
2778 offsetof(SMU72_Firmware_Header, SoftRegisters)__builtin_offsetof(SMU72_Firmware_Header, SoftRegisters),
2779 &tmp, SMC_RAM_END0x40000);
2780
2781 if (!result) {
2782 data->soft_regs_start = tmp;
2783 smu_data->smu7_data.soft_regs_start = tmp;
2784 }
2785
2786 error |= (result != 0);
2787
2788
2789 result = smu7_read_smc_sram_dword(hwmgr,
2790 SMU72_FIRMWARE_HEADER_LOCATION0x20000 +
2791 offsetof(SMU72_Firmware_Header, mcRegisterTable)__builtin_offsetof(SMU72_Firmware_Header, mcRegisterTable),
2792 &tmp, SMC_RAM_END0x40000);
2793
2794 if (!result)
2795 smu_data->smu7_data.mc_reg_table_start = tmp;
2796
2797 result = smu7_read_smc_sram_dword(hwmgr,
2798 SMU72_FIRMWARE_HEADER_LOCATION0x20000 +
2799 offsetof(SMU72_Firmware_Header, FanTable)__builtin_offsetof(SMU72_Firmware_Header, FanTable),
2800 &tmp, SMC_RAM_END0x40000);
2801
2802 if (!result)
2803 smu_data->smu7_data.fan_table_start = tmp;
2804
2805 error |= (result != 0);
2806
2807 result = smu7_read_smc_sram_dword(hwmgr,
2808 SMU72_FIRMWARE_HEADER_LOCATION0x20000 +
2809 offsetof(SMU72_Firmware_Header, mcArbDramTimingTable)__builtin_offsetof(SMU72_Firmware_Header, mcArbDramTimingTable
)
,
2810 &tmp, SMC_RAM_END0x40000);
2811
2812 if (!result)
2813 smu_data->smu7_data.arb_table_start = tmp;
2814
2815 error |= (result != 0);
2816
2817 result = smu7_read_smc_sram_dword(hwmgr,
2818 SMU72_FIRMWARE_HEADER_LOCATION0x20000 +
2819 offsetof(SMU72_Firmware_Header, Version)__builtin_offsetof(SMU72_Firmware_Header, Version),
2820 &tmp, SMC_RAM_END0x40000);
2821
2822 if (!result)
2823 hwmgr->microcode_version_info.SMC = tmp;
2824
2825 error |= (result != 0);
2826
2827 return error ? 1 : 0;
2828}
2829
2830/*---------------------------MC----------------------------*/
2831
2832static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2833{
2834 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4)(((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0x5cd))
>> 16));
2835}
2836
2837static bool_Bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2838{
2839 bool_Bool result = true1;
2840
2841 switch (in_reg) {
2842 case mmMC_SEQ_RAS_TIMING0xa28:
2843 *out_reg = mmMC_SEQ_RAS_TIMING_LP0xa9b;
2844 break;
2845
2846 case mmMC_SEQ_DLL_STBY0xd8e:
2847 *out_reg = mmMC_SEQ_DLL_STBY_LP0xd8f;
2848 break;
2849
2850 case mmMC_SEQ_G5PDX_CMD00xd83:
2851 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP0xd84;
2852 break;
2853
2854 case mmMC_SEQ_G5PDX_CMD10xd85:
2855 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP0xd86;
2856 break;
2857
2858 case mmMC_SEQ_G5PDX_CTRL0xd81:
2859 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP0xd82;
2860 break;
2861
2862 case mmMC_SEQ_CAS_TIMING0xa29:
2863 *out_reg = mmMC_SEQ_CAS_TIMING_LP0xa9c;
2864 break;
2865
2866 case mmMC_SEQ_MISC_TIMING0xa2a:
2867 *out_reg = mmMC_SEQ_MISC_TIMING_LP0xa9d;
2868 break;
2869
2870 case mmMC_SEQ_MISC_TIMING20xa2b:
2871 *out_reg = mmMC_SEQ_MISC_TIMING2_LP0xa9e;
2872 break;
2873
2874 case mmMC_SEQ_PMG_DVS_CMD0xd8c:
2875 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP0xd8d;
2876 break;
2877
2878 case mmMC_SEQ_PMG_DVS_CTL0xd8a:
2879 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP0xd8b;
2880 break;
2881
2882 case mmMC_SEQ_RD_CTL_D00xa2d:
2883 *out_reg = mmMC_SEQ_RD_CTL_D0_LP0xac7;
2884 break;
2885
2886 case mmMC_SEQ_RD_CTL_D10xa2e:
2887 *out_reg = mmMC_SEQ_RD_CTL_D1_LP0xac8;
2888 break;
2889
2890 case mmMC_SEQ_WR_CTL_D00xa2f:
2891 *out_reg = mmMC_SEQ_WR_CTL_D0_LP0xa9f;
2892 break;
2893
2894 case mmMC_SEQ_WR_CTL_D10xa30:
2895 *out_reg = mmMC_SEQ_WR_CTL_D1_LP0xaa0;
2896 break;
2897
2898 case mmMC_PMG_CMD_EMRS0xa83:
2899 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP0xaa1;
2900 break;
2901
2902 case mmMC_PMG_CMD_MRS0xaab:
2903 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP0xaa2;
2904 break;
2905
2906 case mmMC_PMG_CMD_MRS10xad1:
2907 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP0xad2;
2908 break;
2909
2910 case mmMC_SEQ_PMG_TIMING0xa2c:
2911 *out_reg = mmMC_SEQ_PMG_TIMING_LP0xad3;
2912 break;
2913
2914 case mmMC_PMG_CMD_MRS20xad7:
2915 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP0xad8;
2916 break;
2917
2918 case mmMC_SEQ_WR_CTL_20xad5:
2919 *out_reg = mmMC_SEQ_WR_CTL_2_LP0xad6;
2920 break;
2921
2922 default:
2923 result = false0;
2924 break;
2925 }
2926
2927 return result;
2928}
2929
2930static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
2931{
2932 uint32_t i;
2933 uint16_t address;
2934
2935 for (i = 0; i < table->last; i++) {
2936 table->mc_reg_address[i].s0 =
2937 tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
2938 &address) ?
2939 address :
2940 table->mc_reg_address[i].s1;
2941 }
2942 return 0;
2943}
2944
2945static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2946 struct tonga_mc_reg_table *ni_table)
2947{
2948 uint8_t i, j;
2949
2950 PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),do { if (!((table->last <= 16))) { printk("\0014" "amdgpu: "
"%s\n", "Invalid VramInfo table."); return -22; } } while (0
)
2951 "Invalid VramInfo table.", return -EINVAL)do { if (!((table->last <= 16))) { printk("\0014" "amdgpu: "
"%s\n", "Invalid VramInfo table."); return -22; } } while (0
)
;
2952 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),do { if (!((table->num_entries <= 16))) { printk("\0014"
"amdgpu: " "%s\n", "Invalid VramInfo table."); return -22; }
} while (0)
2953 "Invalid VramInfo table.", return -EINVAL)do { if (!((table->num_entries <= 16))) { printk("\0014"
"amdgpu: " "%s\n", "Invalid VramInfo table."); return -22; }
} while (0)
;
2954
2955 for (i = 0; i < table->last; i++)
2956 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2957
2958 ni_table->last = table->last;
2959
2960 for (i = 0; i < table->num_entries; i++) {
2961 ni_table->mc_reg_table_entry[i].mclk_max =
2962 table->mc_reg_table_entry[i].mclk_max;
2963 for (j = 0; j < table->last; j++) {
2964 ni_table->mc_reg_table_entry[i].mc_data[j] =
2965 table->mc_reg_table_entry[i].mc_data[j];
2966 }
2967 }
2968
2969 ni_table->num_entries = table->num_entries;
2970
2971 return 0;
2972}
2973
2974static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2975 struct tonga_mc_reg_table *table)
2976{
2977 uint8_t i, j, k;
2978 uint32_t temp_reg;
2979 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2980
2981 for (i = 0, j = table->last; i < table->last; i++) {
2982 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),do { if (!((j < 16))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."
); return -22; } } while (0)
2983 "Invalid VramInfo table.", return -EINVAL)do { if (!((j < 16))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."
); return -22; } } while (0)
;
2984
2985 switch (table->mc_reg_address[i].s1) {
2986
2987 case mmMC_SEQ_MISC10xa81:
2988 temp_reg = cgs_read_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0xa83))
2989 mmMC_PMG_CMD_EMRS)(((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0xa83))
;
2990 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS0xa83;
2991 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP0xaa1;
2992 for (k = 0; k < table->num_entries; k++) {
2993 table->mc_reg_table_entry[k].mc_data[j] =
2994 ((temp_reg & 0xffff0000)) |
2995 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2996 }
2997 j++;
2998
2999 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),do { if (!((j < 16))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."
); return -22; } } while (0)
3000 "Invalid VramInfo table.", return -EINVAL)do { if (!((j < 16))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."
); return -22; } } while (0)
;
3001 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)(((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0xaab))
;
3002 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS0xaab;
3003 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP0xaa2;
3004 for (k = 0; k < table->num_entries; k++) {
3005 table->mc_reg_table_entry[k].mc_data[j] =
3006 (temp_reg & 0xffff0000) |
3007 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3008
3009 if (!data->is_memory_gddr5)
3010 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3011 }
3012 j++;
3013
3014 if (!data->is_memory_gddr5) {
3015 PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),do { if (!((j < 16))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."
); return -22; } } while (0)
3016 "Invalid VramInfo table.", return -EINVAL)do { if (!((j < 16))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."
); return -22; } } while (0)
;
3017 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD0xa34;
3018 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD0xa34;
3019 for (k = 0; k < table->num_entries; k++)
3020 table->mc_reg_table_entry[k].mc_data[j] =
3021 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3022 j++;
3023 }
3024
3025 break;
3026
3027 case mmMC_SEQ_RESERVE_M0xa82:
3028 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)(((struct cgs_device *)hwmgr->device)->ops->read_register
(hwmgr->device,0xad1))
;
3029 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS10xad1;
3030 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP0xad2;
3031 for (k = 0; k < table->num_entries; k++) {
3032 table->mc_reg_table_entry[k].mc_data[j] =
3033 (temp_reg & 0xffff0000) |
3034 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3035 }
3036 j++;
3037 break;
3038
3039 default:
3040 break;
3041 }
3042
3043 }
3044
3045 table->last = j;
3046
3047 return 0;
3048}
3049
3050static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
3051{
3052 uint8_t i, j;
3053
3054 for (i = 0; i < table->last; i++) {
3055 for (j = 1; j < table->num_entries; j++) {
3056 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3057 table->mc_reg_table_entry[j].mc_data[i]) {
3058 table->validflag |= (1<<i);
3059 break;
3060 }
3061 }
3062 }
3063
3064 return 0;
3065}
3066
3067static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
3068{
3069 int result;
3070 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
3071 pp_atomctrl_mc_reg_table *table;
3072 struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
3073 uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
3074
3075 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL(0x0001 | 0x0004));
3076
3077 if (table == NULL((void *)0))
3078 return -ENOMEM12;
3079
3080 /* Program additional LP registers that are no longer programmed by VBIOS */
3081 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xa9b,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa28))))
3082 cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xa9b,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa28))))
;
3083 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xa9c,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa29))))
3084 cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xa9c,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa29))))
;
3085 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xd8f,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xd8e))))
3086 cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xd8f,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xd8e))))
;
3087 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xd84,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xd83))))
3088 cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xd84,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xd83))))
;
3089 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xd86,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xd85))))
3090 cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xd86,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xd85))))
;
3091 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xd82,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xd81))))
3092 cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xd82,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xd81))))
;
3093 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xd8d,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xd8c))))
3094 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xd8d,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xd8c))))
;
3095 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xd8b,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xd8a))))
3096 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xd8b,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xd8a))))
;
3097 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xa9d,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa2a))))
3098 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xa9d,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa2a))))
;
3099 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xa9e,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa2b))))
3100 cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xa9e,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa2b))))
;
3101 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xaa1,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa83))))
3102 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xaa1,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa83))))
;
3103 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xaa2,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xaab))))
3104 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xaa2,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xaab))))
;
3105 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xad2,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xad1))))
3106 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xad2,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xad1))))
;
3107 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xa9f,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa2f))))
3108 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xa9f,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa2f))))
;
3109 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xaa0,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa30))))
3110 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xaa0,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa30))))
;
3111 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xac7,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa2d))))
3112 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xac7,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa2d))))
;
3113 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xac8,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa2e))))
3114 cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xac8,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa2e))))
;
3115 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xad3,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa2c))))
3116 cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xad3,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xa2c))))
;
3117 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xad8,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xad7))))
3118 cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xad8,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xad7))))
;
3119 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xad6,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xad5))))
3120 cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2))(((struct cgs_device *)hwmgr->device)->ops->write_register
(hwmgr->device,0xad6,(((struct cgs_device *)hwmgr->device
)->ops->read_register(hwmgr->device,0xad5))))
;
3121
3122 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
3123
3124 if (!result)
3125 result = tonga_copy_vbios_smc_reg_table(table, ni_table);
3126
3127 if (!result) {
3128 tonga_set_s0_mc_reg_index(ni_table);
3129 result = tonga_set_mc_special_registers(hwmgr, ni_table);
3130 }
3131
3132 if (!result)
3133 tonga_set_valid_flag(ni_table);
3134
3135 kfree(table);
3136
3137 return result;
3138}
3139
3140static bool_Bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
3141{
3142 return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x3f810))) & 0x2000) >>
0xd)
3143 CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,0x3f810))) & 0x2000) >>
0xd)
)
3144 ? true1 : false0;
3145}
3146
3147static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
3148 void *profile_setting)
3149{
3150 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3151 struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
3152 (hwmgr->smu_backend);
3153 struct profile_mode_setting *setting;
3154 struct SMU72_Discrete_GraphicsLevel *levels =
3155 smu_data->smc_state_table.GraphicsLevel;
3156 uint32_t array = smu_data->smu7_data.dpm_table_start +
3157 offsetof(SMU72_Discrete_DpmTable, GraphicsLevel)__builtin_offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
3158
3159 uint32_t mclk_array = smu_data->smu7_data.dpm_table_start +
3160 offsetof(SMU72_Discrete_DpmTable, MemoryLevel)__builtin_offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
3161 struct SMU72_Discrete_MemoryLevel *mclk_levels =
3162 smu_data->smc_state_table.MemoryLevel;
3163 uint32_t i;
3164 uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
3165
3166 if (profile_setting == NULL((void *)0))
3167 return -EINVAL22;
3168
3169 setting = (struct profile_mode_setting *)profile_setting;
3170
3171 if (setting->bupdate_sclk) {
3172 if (!data->sclk_dpm_key_disabled)
3173 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel((uint16_t) 0x189), NULL((void *)0));
3174 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
3175 if (levels[i].ActivityLevel !=
3176 cpu_to_be16(setting->sclk_activity)(__uint16_t)(__builtin_constant_p(setting->sclk_activity) ?
(__uint16_t)(((__uint16_t)(setting->sclk_activity) & 0xffU
) << 8 | ((__uint16_t)(setting->sclk_activity) &
0xff00U) >> 8) : __swap16md(setting->sclk_activity)
)
) {
3177 levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity)(__uint16_t)(__builtin_constant_p(setting->sclk_activity) ?
(__uint16_t)(((__uint16_t)(setting->sclk_activity) & 0xffU
) << 8 | ((__uint16_t)(setting->sclk_activity) &
0xff00U) >> 8) : __swap16md(setting->sclk_activity)
)
;
3178
3179 clk_activity_offset = array + (sizeof(SMU72_Discrete_GraphicsLevel) * i)
3180 + offsetof(SMU72_Discrete_GraphicsLevel, ActivityLevel)__builtin_offsetof(SMU72_Discrete_GraphicsLevel, ActivityLevel
)
;
3181 offset = clk_activity_offset & ~0x3;
3182 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset))(__uint32_t)(__builtin_constant_p((((struct cgs_device *)hwmgr
->device)->ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC
,offset))) ? (__uint32_t)(((__uint32_t)((((struct cgs_device *
)hwmgr->device)->ops->read_ind_register(hwmgr->device
,CGS_IND_REG__SMC,offset))) & 0xff) << 24 | ((__uint32_t
)((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,offset))) & 0xff00) <<
8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)->
ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset
))) & 0xff0000) >> 8 | ((__uint32_t)((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,offset))) & 0xff000000) >> 24
) : __swap32md((((struct cgs_device *)hwmgr->device)->ops
->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset
))))
;
3183 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
3184 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp))(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,offset,(__uint32_t)(__builtin_constant_p
(tmp) ? (__uint32_t)(((__uint32_t)(tmp) & 0xff) << 24
| ((__uint32_t)(tmp) & 0xff00) << 8 | ((__uint32_t
)(tmp) & 0xff0000) >> 8 | ((__uint32_t)(tmp) & 0xff000000
) >> 24) : __swap32md(tmp))))
;
3185
3186 }
3187 if (levels[i].UpHyst != setting->sclk_up_hyst ||
3188 levels[i].DownHyst != setting->sclk_down_hyst) {
3189 levels[i].UpHyst = setting->sclk_up_hyst;
3190 levels[i].DownHyst = setting->sclk_down_hyst;
3191 up_hyst_offset = array + (sizeof(SMU72_Discrete_GraphicsLevel) * i)
3192 + offsetof(SMU72_Discrete_GraphicsLevel, UpHyst)__builtin_offsetof(SMU72_Discrete_GraphicsLevel, UpHyst);
3193 down_hyst_offset = array + (sizeof(SMU72_Discrete_GraphicsLevel) * i)
3194 + offsetof(SMU72_Discrete_GraphicsLevel, DownHyst)__builtin_offsetof(SMU72_Discrete_GraphicsLevel, DownHyst);
3195 offset = up_hyst_offset & ~0x3;
3196 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset))(__uint32_t)(__builtin_constant_p((((struct cgs_device *)hwmgr
->device)->ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC
,offset))) ? (__uint32_t)(((__uint32_t)((((struct cgs_device *
)hwmgr->device)->ops->read_ind_register(hwmgr->device
,CGS_IND_REG__SMC,offset))) & 0xff) << 24 | ((__uint32_t
)((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,offset))) & 0xff00) <<
8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)->
ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset
))) & 0xff0000) >> 8 | ((__uint32_t)((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,offset))) & 0xff000000) >> 24
) : __swap32md((((struct cgs_device *)hwmgr->device)->ops
->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset
))))
;
3197 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpHyst, sizeof(uint8_t));
3198 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownHyst, sizeof(uint8_t));
3199 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp))(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,offset,(__uint32_t)(__builtin_constant_p
(tmp) ? (__uint32_t)(((__uint32_t)(tmp) & 0xff) << 24
| ((__uint32_t)(tmp) & 0xff00) << 8 | ((__uint32_t
)(tmp) & 0xff0000) >> 8 | ((__uint32_t)(tmp) & 0xff000000
) >> 24) : __swap32md(tmp))))
;
3200 }
3201 }
3202 if (!data->sclk_dpm_key_disabled)
3203 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel((uint16_t) 0x18A), NULL((void *)0));
3204 }
3205
3206 if (setting->bupdate_mclk) {
3207 if (!data->mclk_dpm_key_disabled)
3208 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel((uint16_t) 0x18B), NULL((void *)0));
3209 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
3210 if (mclk_levels[i].ActivityLevel !=
3211 cpu_to_be16(setting->mclk_activity)(__uint16_t)(__builtin_constant_p(setting->mclk_activity) ?
(__uint16_t)(((__uint16_t)(setting->mclk_activity) & 0xffU
) << 8 | ((__uint16_t)(setting->mclk_activity) &
0xff00U) >> 8) : __swap16md(setting->mclk_activity)
)
) {
3212 mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity)(__uint16_t)(__builtin_constant_p(setting->mclk_activity) ?
(__uint16_t)(((__uint16_t)(setting->mclk_activity) & 0xffU
) << 8 | ((__uint16_t)(setting->mclk_activity) &
0xff00U) >> 8) : __swap16md(setting->mclk_activity)
)
;
3213
3214 clk_activity_offset = mclk_array + (sizeof(SMU72_Discrete_MemoryLevel) * i)
3215 + offsetof(SMU72_Discrete_MemoryLevel, ActivityLevel)__builtin_offsetof(SMU72_Discrete_MemoryLevel, ActivityLevel);
3216 offset = clk_activity_offset & ~0x3;
3217 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset))(__uint32_t)(__builtin_constant_p((((struct cgs_device *)hwmgr
->device)->ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC
,offset))) ? (__uint32_t)(((__uint32_t)((((struct cgs_device *
)hwmgr->device)->ops->read_ind_register(hwmgr->device
,CGS_IND_REG__SMC,offset))) & 0xff) << 24 | ((__uint32_t
)((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,offset))) & 0xff00) <<
8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)->
ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset
))) & 0xff0000) >> 8 | ((__uint32_t)((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,offset))) & 0xff000000) >> 24
) : __swap32md((((struct cgs_device *)hwmgr->device)->ops
->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset
))))
;
3218 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
3219 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp))(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,offset,(__uint32_t)(__builtin_constant_p
(tmp) ? (__uint32_t)(((__uint32_t)(tmp) & 0xff) << 24
| ((__uint32_t)(tmp) & 0xff00) << 8 | ((__uint32_t
)(tmp) & 0xff0000) >> 8 | ((__uint32_t)(tmp) & 0xff000000
) >> 24) : __swap32md(tmp))))
;
3220
3221 }
3222 if (mclk_levels[i].UpHyst != setting->mclk_up_hyst ||
3223 mclk_levels[i].DownHyst != setting->mclk_down_hyst) {
3224 mclk_levels[i].UpHyst = setting->mclk_up_hyst;
3225 mclk_levels[i].DownHyst = setting->mclk_down_hyst;
3226 up_hyst_offset = mclk_array + (sizeof(SMU72_Discrete_MemoryLevel) * i)
3227 + offsetof(SMU72_Discrete_MemoryLevel, UpHyst)__builtin_offsetof(SMU72_Discrete_MemoryLevel, UpHyst);
3228 down_hyst_offset = mclk_array + (sizeof(SMU72_Discrete_MemoryLevel) * i)
3229 + offsetof(SMU72_Discrete_MemoryLevel, DownHyst)__builtin_offsetof(SMU72_Discrete_MemoryLevel, DownHyst);
3230 offset = up_hyst_offset & ~0x3;
3231 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset))(__uint32_t)(__builtin_constant_p((((struct cgs_device *)hwmgr
->device)->ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC
,offset))) ? (__uint32_t)(((__uint32_t)((((struct cgs_device *
)hwmgr->device)->ops->read_ind_register(hwmgr->device
,CGS_IND_REG__SMC,offset))) & 0xff) << 24 | ((__uint32_t
)((((struct cgs_device *)hwmgr->device)->ops->read_ind_register
(hwmgr->device,CGS_IND_REG__SMC,offset))) & 0xff00) <<
8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)->
ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset
))) & 0xff0000) >> 8 | ((__uint32_t)((((struct cgs_device
*)hwmgr->device)->ops->read_ind_register(hwmgr->
device,CGS_IND_REG__SMC,offset))) & 0xff000000) >> 24
) : __swap32md((((struct cgs_device *)hwmgr->device)->ops
->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset
))))
;
3232 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpHyst, sizeof(uint8_t));
3233 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownHyst, sizeof(uint8_t));
3234 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp))(((struct cgs_device *)hwmgr->device)->ops->write_ind_register
(hwmgr->device,CGS_IND_REG__SMC,offset,(__uint32_t)(__builtin_constant_p
(tmp) ? (__uint32_t)(((__uint32_t)(tmp) & 0xff) << 24
| ((__uint32_t)(tmp) & 0xff00) << 8 | ((__uint32_t
)(tmp) & 0xff0000) >> 8 | ((__uint32_t)(tmp) & 0xff000000
) >> 24) : __swap32md(tmp))))
;
3235 }
3236 }
3237 if (!data->mclk_dpm_key_disabled)
3238 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel((uint16_t) 0x18C), NULL((void *)0));
3239 }
3240 return 0;
3241}
3242
3243const struct pp_smumgr_func tonga_smu_funcs = {
3244 .name = "tonga_smu",
3245 .smu_init = &tonga_smu_init,
3246 .smu_fini = &smu7_smu_fini,
3247 .start_smu = &tonga_start_smu,
3248 .check_fw_load_finish = &smu7_check_fw_load_finish,
3249 .request_smu_load_fw = &smu7_request_smu_load_fw,
3250 .request_smu_load_specific_fw = NULL((void *)0),
3251 .send_msg_to_smc = &smu7_send_msg_to_smc,
3252 .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
3253 .get_argument = smu7_get_argument,
3254 .download_pptable_settings = NULL((void *)0),
3255 .upload_pptable_settings = NULL((void *)0),
3256 .update_smc_table = tonga_update_smc_table,
3257 .get_offsetof = tonga_get_offsetof,
3258 .process_firmware_header = tonga_process_firmware_header,
3259 .init_smc_table = tonga_init_smc_table,
3260 .update_sclk_threshold = tonga_update_sclk_threshold,
3261 .thermal_setup_fan_table = tonga_thermal_setup_fan_table,
3262 .populate_all_graphic_levels = tonga_populate_all_graphic_levels,
3263 .populate_all_memory_levels = tonga_populate_all_memory_levels,
3264 .get_mac_definition = tonga_get_mac_definition,
3265 .initialize_mc_reg_table = tonga_initialize_mc_reg_table,
3266 .is_dpm_running = tonga_is_dpm_running,
3267 .update_dpm_settings = tonga_update_dpm_settings,
3268};