File: | dev/pci/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c |
Warning: | line 674, column 11 Value stored to 'HiSidd' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include "pp_debug.h" |
25 | #include "smumgr.h" |
26 | #include "smu7_dyn_defaults.h" |
27 | #include "smu73.h" |
28 | #include "smu_ucode_xfer_vi.h" |
29 | #include "fiji_smumgr.h" |
30 | #include "fiji_ppsmc.h" |
31 | #include "smu73_discrete.h" |
32 | #include "ppatomctrl.h" |
33 | #include "smu/smu_7_1_3_d.h" |
34 | #include "smu/smu_7_1_3_sh_mask.h" |
35 | #include "gmc/gmc_8_1_d.h" |
36 | #include "gmc/gmc_8_1_sh_mask.h" |
37 | #include "oss/oss_3_0_d.h" |
38 | #include "gca/gfx_8_0_d.h" |
39 | #include "bif/bif_5_0_d.h" |
40 | #include "bif/bif_5_0_sh_mask.h" |
41 | #include "dce/dce_10_0_d.h" |
42 | #include "dce/dce_10_0_sh_mask.h" |
43 | #include "hardwaremanager.h" |
44 | #include "cgs_common.h" |
45 | #include "atombios.h" |
46 | #include "pppcielanes.h" |
47 | #include "hwmgr.h" |
48 | #include "smu7_hwmgr.h" |
49 | |
50 | |
51 | #define AVFS_EN_MSB1568 1568 |
52 | #define AVFS_EN_LSB1568 1568 |
53 | |
54 | #define FIJI_SMC_SIZE0x20000 0x20000 |
55 | |
56 | #define POWERTUNE_DEFAULT_SET_MAX1 1 |
57 | #define VDDC_VDDCI_DELTA300 300 |
58 | #define MC_CG_ARB_FREQ_F10x0b 0x0b |
59 | |
60 | /* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs |
61 | * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] |
62 | */ |
63 | static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = { |
64 | {600, 1050, 3, 0}, {600, 1050, 6, 1} }; |
65 | |
66 | /* [FF, SS] type, [] 4 voltage ranges, and |
67 | * [Floor Freq, Boundary Freq, VID min , VID max] |
68 | */ |
69 | static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = { |
70 | { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, |
71 | { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; |
72 | |
73 | /* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] |
74 | * (coming from PWR_CKS_CNTL.stretch_amount reg spec) |
75 | */ |
76 | static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = { |
77 | {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} }; |
78 | |
79 | static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX1] = { |
80 | /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */ |
81 | {1, 0xF, 0xFD, |
82 | /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */ |
83 | 0x19, 5, 45} |
84 | }; |
85 | |
86 | static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = { |
87 | /* Min Sclk pcie DeepSleep Activity CgSpll CgSpll spllSpread SpllSpread CcPwr CcPwr Sclk Display Enabled Enabled Voltage Power */ |
88 | /* Voltage, Frequency, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, Spectrum, Spectrum2, DynRm, DynRm1 Did, Watermark, ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */ |
89 | { 0x3c0fd047, 0x30750000, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0x21680000, 0x0c000000, 0, 0, 0x16, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, |
90 | { 0xa00fd047, 0x409c0000, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0x21680000, 0x11000000, 0, 0, 0x16, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, |
91 | { 0x0410d047, 0x50c30000, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0x21680000, 0x0d000000, 0, 0, 0x0e, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, |
92 | { 0x6810d047, 0x60ea0000, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0x21680000, 0x0e000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, |
93 | { 0xcc10d047, 0xe8fd0000, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0x21680000, 0x0f000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, |
94 | { 0x3011d047, 0x70110100, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0x21680000, 0x10000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, |
95 | { 0x9411d047, 0xf8240100, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0x21680000, 0x11000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, |
96 | { 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 } |
97 | }; |
98 | |
99 | static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) |
100 | { |
101 | int result = 0; |
102 | |
103 | /* Wait for smc boot up */ |
104 | /* PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, |
105 | RCU_UC_EVENTS, boot_seq_done, 0); */ |
106 | |
107 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((1) << 0x0))))) |
108 | SMC_SYSCON_RESET_CNTL, rst_reg, 1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((1) << 0x0))))); |
109 | |
110 | result = smu7_upload_smu_firmware_image(hwmgr); |
111 | if (result) |
112 | return result; |
113 | |
114 | /* Clear status */ |
115 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe0003088,0)) |
116 | ixSMU_STATUS, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe0003088,0)); |
117 | |
118 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000004,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000004))) & ~0x1) | (0x1 & ((0) << 0x0))))) |
119 | SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000004,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000004))) & ~0x1) | (0x1 & ((0) << 0x0))))); |
120 | |
121 | /* De-assert reset */ |
122 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((0) << 0x0))))) |
123 | SMC_SYSCON_RESET_CNTL, rst_reg, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((0) << 0x0))))); |
124 | |
125 | /* Wait for ROM firmware to initialize interrupt hendler */ |
126 | /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, SMC_IND, |
127 | SMC_INTR_CNTL_MASK_0, 0x10040, 0xFFFFFFFF); */ |
128 | |
129 | /* Set SMU Auto Start */ |
130 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe00030b8,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xe00030b8))) & ~0x80000000) | (0x80000000 & ((1) << 0x1f))))) |
131 | SMU_INPUT_DATA, AUTO_START, 1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe00030b8,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xe00030b8))) & ~0x80000000) | (0x80000000 & ((1) << 0x1f))))); |
132 | |
133 | /* Clear firmware interrupt enable flag */ |
134 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x3f000,0)) |
135 | ixFIRMWARE_FLAGS, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x3f000,0)); |
136 | |
137 | PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,phm_wait_on_indirect_register(hwmgr, 0x1AC, 0xc0000004, (1) << 0x10, 0x10000) |
138 | INTERRUPTS_ENABLED, 1)phm_wait_on_indirect_register(hwmgr, 0x1AC, 0xc0000004, (1) << 0x10, 0x10000); |
139 | |
140 | smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test((uint16_t) 0x100), 0x20000, NULL((void *)0)); |
141 | |
142 | /* Wait for done bit to be set */ |
143 | PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,phm_wait_for_indirect_register_unequal(hwmgr, 0x1AC, 0xe0003088 , (0) << 0x0, 0x1) |
144 | SMU_STATUS, SMU_DONE, 0)phm_wait_for_indirect_register_unequal(hwmgr, 0x1AC, 0xe0003088 , (0) << 0x0, 0x1); |
145 | |
146 | /* Check pass/failed indicator */ |
147 | if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe0003088))) & 0x2) >> 0x1) |
148 | SMU_STATUS, SMU_PASS)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe0003088))) & 0x2) >> 0x1) != 1) { |
149 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "SMU Firmware start failed!" ); return -1; } } while (0) |
150 | "SMU Firmware start failed!", return -1)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "SMU Firmware start failed!" ); return -1; } } while (0); |
151 | } |
152 | |
153 | /* Wait for firmware to initialize */ |
154 | PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,phm_wait_on_indirect_register(hwmgr, 0x1AC, 0x3f000, (1) << 0x0, 0x1) |
155 | FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1)phm_wait_on_indirect_register(hwmgr, 0x1AC, 0x3f000, (1) << 0x0, 0x1); |
156 | |
157 | return result; |
158 | } |
159 | |
160 | static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) |
161 | { |
162 | int result = 0; |
163 | |
164 | /* wait for smc boot up */ |
165 | PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,phm_wait_for_indirect_register_unequal(hwmgr, 0x1AC, 0xc0000004 , (0) << 0x7, 0x80) |
166 | RCU_UC_EVENTS, boot_seq_done, 0)phm_wait_for_indirect_register_unequal(hwmgr, 0x1AC, 0xc0000004 , (0) << 0x7, 0x80); |
167 | |
168 | /* Clear firmware interrupt enable flag */ |
169 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x3f000,0)) |
170 | ixFIRMWARE_FLAGS, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x3f000,0)); |
171 | |
172 | /* Assert reset */ |
173 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((1) << 0x0))))) |
174 | SMC_SYSCON_RESET_CNTL, rst_reg, 1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((1) << 0x0))))); |
175 | |
176 | result = smu7_upload_smu_firmware_image(hwmgr); |
177 | if (result) |
178 | return result; |
179 | |
180 | /* Set smc instruct start point at 0x0 */ |
181 | smu7_program_jump_on_start(hwmgr); |
182 | |
183 | /* Enable clock */ |
184 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000004,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000004))) & ~0x1) | (0x1 & ((0) << 0x0))))) |
185 | SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000004,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000004))) & ~0x1) | (0x1 & ((0) << 0x0))))); |
186 | |
187 | /* De-assert reset */ |
188 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((0) << 0x0))))) |
189 | SMC_SYSCON_RESET_CNTL, rst_reg, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((0) << 0x0))))); |
190 | |
191 | /* Wait for firmware to initialize */ |
192 | PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,phm_wait_on_indirect_register(hwmgr, 0x1AC, 0x3f000, (1) << 0x0, 0x1) |
193 | FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1)phm_wait_on_indirect_register(hwmgr, 0x1AC, 0x3f000, (1) << 0x0, 0x1); |
194 | |
195 | return result; |
196 | } |
197 | |
198 | static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr) |
199 | { |
200 | int result = 0; |
201 | struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); |
202 | |
203 | if (0 != smu_data->avfs_btc_param) { |
204 | if (0 != smum_send_msg_to_smc_with_parameter(hwmgr, |
205 | PPSMC_MSG_PerformBtc((uint16_t) 0x26C), smu_data->avfs_btc_param, |
206 | NULL((void *)0))) { |
207 | pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed")do { } while(0); |
208 | result = -EINVAL22; |
209 | } |
210 | } |
211 | /* Soft-Reset to reset the engine before loading uCode */ |
212 | /* halt */ |
213 | cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000)(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0x208d,0x50000000)); |
214 | /* reset everything */ |
215 | cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff)(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0x2008,0xffffffff)); |
216 | /* clear reset */ |
217 | cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0)(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0x2008,0)); |
218 | |
219 | return result; |
220 | } |
221 | |
222 | static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr) |
223 | { |
224 | int32_t vr_config; |
225 | uint32_t table_start; |
226 | uint32_t level_addr, vr_config_addr; |
227 | uint32_t level_size = sizeof(avfs_graphics_level); |
228 | |
229 | PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr,do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0) |
230 | SMU7_FIRMWARE_HEADER_LOCATION +do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0) |
231 | offsetof(SMU73_Firmware_Header, DpmTable),do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0) |
232 | &table_start, 0x40000),do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0) |
233 | "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not "do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0) |
234 | "communicate starting address of DPM table",do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0) |
235 | return -1;)do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0); |
236 | |
237 | /* Default value for vr_config = |
238 | * VR_MERGED_WITH_VDDC + VR_STATIC_VOLTAGE(VDDCI) */ |
239 | vr_config = 0x01000500; /* Real value:0x50001 */ |
240 | |
241 | vr_config_addr = table_start + |
242 | offsetof(SMU73_Discrete_DpmTable, VRConfig)__builtin_offsetof(SMU73_Discrete_DpmTable, VRConfig); |
243 | |
244 | PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr,do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC"); return -1;; } } while (0) |
245 | (uint8_t *)&vr_config, sizeof(int32_t), 0x40000),do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC"); return -1;; } } while (0) |
246 | "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC"); return -1;; } } while (0) |
247 | "vr_config value over to SMC",do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC"); return -1;; } } while (0) |
248 | return -1;)do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC"); return -1;; } } while (0); |
249 | |
250 | level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); |
251 | |
252 | PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr,do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!" ); return -1;; } } while (0) |
253 | (uint8_t *)(&avfs_graphics_level), level_size, 0x40000),do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!" ); return -1;; } } while (0) |
254 | "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!" ); return -1;; } } while (0) |
255 | return -1;)do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!" ); return -1;; } } while (0); |
256 | |
257 | return 0; |
258 | } |
259 | |
260 | static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr) |
261 | { |
262 | if (!hwmgr->avfs_supported) |
263 | return 0; |
264 | |
265 | PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),do { if (!(0 == fiji_setup_graphics_level_structure(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU"); return -22; } } while (0) |
266 | "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"do { if (!(0 == fiji_setup_graphics_level_structure(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU"); return -22; } } while (0) |
267 | " table over to SMU",do { if (!(0 == fiji_setup_graphics_level_structure(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU"); return -22; } } while (0) |
268 | return -EINVAL)do { if (!(0 == fiji_setup_graphics_level_structure(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU"); return -22; } } while (0); |
269 | PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),do { if (!(0 == smu7_setup_pwr_virus(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS "); return -22; } } while (0) |
270 | "[AVFS][fiji_avfs_event_mgr] Could not setup "do { if (!(0 == smu7_setup_pwr_virus(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS "); return -22; } } while (0) |
271 | "Pwr Virus for AVFS ",do { if (!(0 == smu7_setup_pwr_virus(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS "); return -22; } } while (0) |
272 | return -EINVAL)do { if (!(0 == smu7_setup_pwr_virus(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS "); return -22; } } while (0); |
273 | PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),do { if (!(0 == fiji_start_avfs_btc(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled"); return -22; } } while (0) |
274 | "[AVFS][fiji_avfs_event_mgr] Failure at "do { if (!(0 == fiji_start_avfs_btc(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled"); return -22; } } while (0) |
275 | "fiji_start_avfs_btc. AVFS Disabled",do { if (!(0 == fiji_start_avfs_btc(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled"); return -22; } } while (0) |
276 | return -EINVAL)do { if (!(0 == fiji_start_avfs_btc(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled"); return -22; } } while (0); |
277 | |
278 | return 0; |
279 | } |
280 | |
281 | static int fiji_start_smu(struct pp_hwmgr *hwmgr) |
282 | { |
283 | int result = 0; |
284 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend); |
285 | |
286 | /* Only start SMC if SMC RAM is not running */ |
287 | if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) { |
288 | /* Check if SMU is running in protected mode */ |
289 | if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe00030a4))) & 0x10000 ) >> 0x10) |
290 | CGS_IND_REG__SMC,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe00030a4))) & 0x10000 ) >> 0x10) |
291 | SMU_FIRMWARE, SMU_MODE)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe00030a4))) & 0x10000 ) >> 0x10)) { |
292 | result = fiji_start_smu_in_non_protection_mode(hwmgr); |
293 | if (result) |
294 | return result; |
295 | } else { |
296 | result = fiji_start_smu_in_protection_mode(hwmgr); |
297 | if (result) |
298 | return result; |
299 | } |
300 | if (fiji_avfs_event_mgr(hwmgr)) |
301 | hwmgr->avfs_supported = false0; |
302 | } |
303 | |
304 | /* Setup SoftRegsStart here for register lookup in case |
305 | * DummyBackEnd is used and ProcessFirmwareHeader is not executed |
306 | */ |
307 | smu7_read_smc_sram_dword(hwmgr, |
308 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + |
309 | offsetof(SMU73_Firmware_Header, SoftRegisters)__builtin_offsetof(SMU73_Firmware_Header, SoftRegisters), |
310 | &(priv->smu7_data.soft_regs_start), 0x40000); |
311 | |
312 | result = smu7_request_smu_load_fw(hwmgr); |
313 | |
314 | return result; |
315 | } |
316 | |
317 | static bool_Bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr) |
318 | { |
319 | |
320 | uint32_t efuse = 0; |
321 | |
322 | if (!hwmgr->not_vf) |
323 | return false0; |
324 | |
325 | if (!atomctrl_read_efuse(hwmgr, AVFS_EN_LSB1568, AVFS_EN_MSB1568, |
326 | &efuse)) { |
327 | if (efuse) |
328 | return true1; |
329 | } |
330 | return false0; |
331 | } |
332 | |
333 | static int fiji_smu_init(struct pp_hwmgr *hwmgr) |
334 | { |
335 | struct fiji_smumgr *fiji_priv = NULL((void *)0); |
336 | |
337 | fiji_priv = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL(0x0001 | 0x0004)); |
338 | |
339 | if (fiji_priv == NULL((void *)0)) |
340 | return -ENOMEM12; |
341 | |
342 | hwmgr->smu_backend = fiji_priv; |
343 | |
344 | if (smu7_init(hwmgr)) { |
345 | kfree(fiji_priv); |
346 | return -EINVAL22; |
347 | } |
348 | |
349 | return 0; |
350 | } |
351 | |
352 | static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, |
353 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_table, |
354 | uint32_t clock, uint32_t *voltage, uint32_t *mvdd) |
355 | { |
356 | uint32_t i; |
357 | uint16_t vddci; |
358 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
359 | *voltage = *mvdd = 0; |
360 | |
361 | |
362 | /* clock - voltage dependency table is empty table */ |
363 | if (dep_table->count == 0) |
364 | return -EINVAL22; |
365 | |
366 | for (i = 0; i < dep_table->count; i++) { |
367 | /* find first sclk bigger than request */ |
368 | if (dep_table->entries[i].clk >= clock) { |
369 | *voltage |= (dep_table->entries[i].vddc * |
370 | VOLTAGE_SCALE4) << VDDC_SHIFT0; |
371 | if (SMU7_VOLTAGE_CONTROL_NONE0x0 == data->vddci_control) |
372 | *voltage |= (data->vbios_boot_state.vddci_bootup_value * |
373 | VOLTAGE_SCALE4) << VDDCI_SHIFT15; |
374 | else if (dep_table->entries[i].vddci) |
375 | *voltage |= (dep_table->entries[i].vddci * |
376 | VOLTAGE_SCALE4) << VDDCI_SHIFT15; |
377 | else { |
378 | vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), |
379 | (dep_table->entries[i].vddc - |
380 | VDDC_VDDCI_DELTA300)); |
381 | *voltage |= (vddci * VOLTAGE_SCALE4) << VDDCI_SHIFT15; |
382 | } |
383 | |
384 | if (SMU7_VOLTAGE_CONTROL_NONE0x0 == data->mvdd_control) |
385 | *mvdd = data->vbios_boot_state.mvdd_bootup_value * |
386 | VOLTAGE_SCALE4; |
387 | else if (dep_table->entries[i].mvdd) |
388 | *mvdd = (uint32_t) dep_table->entries[i].mvdd * |
389 | VOLTAGE_SCALE4; |
390 | |
391 | *voltage |= 1 << PHASES_SHIFT30; |
392 | return 0; |
393 | } |
394 | } |
395 | |
396 | /* sclk is bigger than max sclk in the dependence table */ |
397 | *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE4) << VDDC_SHIFT0; |
398 | |
399 | if (SMU7_VOLTAGE_CONTROL_NONE0x0 == data->vddci_control) |
400 | *voltage |= (data->vbios_boot_state.vddci_bootup_value * |
401 | VOLTAGE_SCALE4) << VDDCI_SHIFT15; |
402 | else if (dep_table->entries[i-1].vddci) { |
403 | vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), |
404 | (dep_table->entries[i].vddc - |
405 | VDDC_VDDCI_DELTA300)); |
406 | *voltage |= (vddci * VOLTAGE_SCALE4) << VDDCI_SHIFT15; |
407 | } |
408 | |
409 | if (SMU7_VOLTAGE_CONTROL_NONE0x0 == data->mvdd_control) |
410 | *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE4; |
411 | else if (dep_table->entries[i].mvdd) |
412 | *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE4; |
413 | |
414 | return 0; |
415 | } |
416 | |
417 | |
418 | static uint16_t scale_fan_gain_settings(uint16_t raw_setting) |
419 | { |
420 | uint32_t tmp; |
421 | tmp = raw_setting * 4096 / 100; |
422 | return (uint16_t)tmp; |
423 | } |
424 | |
425 | static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda) |
426 | { |
427 | switch (line) { |
428 | case SMU7_I2CLineID_DDC1: |
429 | *scl = SMU7_I2C_DDC1CLK1; |
430 | *sda = SMU7_I2C_DDC1DATA0; |
431 | break; |
432 | case SMU7_I2CLineID_DDC2: |
433 | *scl = SMU7_I2C_DDC2CLK3; |
434 | *sda = SMU7_I2C_DDC2DATA2; |
435 | break; |
436 | case SMU7_I2CLineID_DDC3: |
437 | *scl = SMU7_I2C_DDC3CLK5; |
438 | *sda = SMU7_I2C_DDC3DATA4; |
439 | break; |
440 | case SMU7_I2CLineID_DDC4: |
441 | *scl = SMU7_I2C_DDC4CLK66; |
442 | *sda = SMU7_I2C_DDC4DATA65; |
443 | break; |
444 | case SMU7_I2CLineID_DDC5: |
445 | *scl = SMU7_I2C_DDC5CLK0x49; |
446 | *sda = SMU7_I2C_DDC5DATA0x48; |
447 | break; |
448 | case SMU7_I2CLineID_DDC6: |
449 | *scl = SMU7_I2C_DDC6CLK0x4b; |
450 | *sda = SMU7_I2C_DDC6DATA0x4a; |
451 | break; |
452 | case SMU7_I2CLineID_SCLSDA: |
453 | *scl = SMU7_I2C_SCL41; |
454 | *sda = SMU7_I2C_SDA40; |
455 | break; |
456 | case SMU7_I2CLineID_DDCVGA: |
457 | *scl = SMU7_I2C_DDCVGACLK0x4d; |
458 | *sda = SMU7_I2C_DDCVGADATA0x4c; |
459 | break; |
460 | default: |
461 | *scl = 0; |
462 | *sda = 0; |
463 | break; |
464 | } |
465 | } |
466 | |
467 | static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) |
468 | { |
469 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
470 | struct phm_ppt_v1_information *table_info = |
471 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
472 | |
473 | if (table_info && |
474 | table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX1 && |
475 | table_info->cac_dtp_table->usPowerTuneDataSetID) |
476 | smu_data->power_tune_defaults = |
477 | &fiji_power_tune_data_set_array |
478 | [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; |
479 | else |
480 | smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0]; |
481 | |
482 | } |
483 | |
484 | static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) |
485 | { |
486 | |
487 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
488 | const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; |
489 | |
490 | SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); |
491 | |
492 | struct phm_ppt_v1_information *table_info = |
493 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
494 | struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; |
495 | struct pp_advance_fan_control_parameters *fan_table = |
496 | &hwmgr->thermal_controller.advanceFanControlParameters; |
497 | uint8_t uc_scl, uc_sda; |
498 | |
499 | /* TDP number of fraction bits are changed from 8 to 7 for Fiji |
500 | * as requested by SMC team |
501 | */ |
502 | dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p((uint16_t)(cac_dtp_table-> usTDP * 128)) ? (__uint16_t)(((__uint16_t)((uint16_t)(cac_dtp_table ->usTDP * 128)) & 0xffU) << 8 | ((__uint16_t)((uint16_t )(cac_dtp_table->usTDP * 128)) & 0xff00U) >> 8) : __swap16md((uint16_t)(cac_dtp_table->usTDP * 128))) |
503 | (uint16_t)(cac_dtp_table->usTDP * 128))(__uint16_t)(__builtin_constant_p((uint16_t)(cac_dtp_table-> usTDP * 128)) ? (__uint16_t)(((__uint16_t)((uint16_t)(cac_dtp_table ->usTDP * 128)) & 0xffU) << 8 | ((__uint16_t)((uint16_t )(cac_dtp_table->usTDP * 128)) & 0xff00U) >> 8) : __swap16md((uint16_t)(cac_dtp_table->usTDP * 128))); |
504 | dpm_table->TargetTdp = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p((uint16_t)(cac_dtp_table-> usTDP * 128)) ? (__uint16_t)(((__uint16_t)((uint16_t)(cac_dtp_table ->usTDP * 128)) & 0xffU) << 8 | ((__uint16_t)((uint16_t )(cac_dtp_table->usTDP * 128)) & 0xff00U) >> 8) : __swap16md((uint16_t)(cac_dtp_table->usTDP * 128))) |
505 | (uint16_t)(cac_dtp_table->usTDP * 128))(__uint16_t)(__builtin_constant_p((uint16_t)(cac_dtp_table-> usTDP * 128)) ? (__uint16_t)(((__uint16_t)((uint16_t)(cac_dtp_table ->usTDP * 128)) & 0xffU) << 8 | ((__uint16_t)((uint16_t )(cac_dtp_table->usTDP * 128)) & 0xff00U) >> 8) : __swap16md((uint16_t)(cac_dtp_table->usTDP * 128))); |
506 | |
507 | PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,do { if (!(cac_dtp_table->usTargetOperatingTemp <= 255) ) { printk("\0014" "amdgpu: " "%s\n", "Target Operating Temp is out of Range!" ); ; } } while (0) |
508 | "Target Operating Temp is out of Range!",do { if (!(cac_dtp_table->usTargetOperatingTemp <= 255) ) { printk("\0014" "amdgpu: " "%s\n", "Target Operating Temp is out of Range!" ); ; } } while (0) |
509 | )do { if (!(cac_dtp_table->usTargetOperatingTemp <= 255) ) { printk("\0014" "amdgpu: " "%s\n", "Target Operating Temp is out of Range!" ); ; } } while (0); |
510 | |
511 | dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); |
512 | dpm_table->GpuTjHyst = 8; |
513 | |
514 | dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase; |
515 | |
516 | /* The following are for new Fiji Multi-input fan/thermal control */ |
517 | dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTargetOperatingTemp * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTargetOperatingTemp * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTargetOperatingTemp * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTargetOperatingTemp * 256)) |
518 | cac_dtp_table->usTargetOperatingTemp * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTargetOperatingTemp * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTargetOperatingTemp * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTargetOperatingTemp * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTargetOperatingTemp * 256)); |
519 | dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitHotspot * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitHotspot * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitHotspot * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitHotspot * 256)) |
520 | cac_dtp_table->usTemperatureLimitHotspot * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitHotspot * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitHotspot * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitHotspot * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitHotspot * 256)); |
521 | dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitLiquid1 * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitLiquid1 * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitLiquid1 * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitLiquid1 * 256)) |
522 | cac_dtp_table->usTemperatureLimitLiquid1 * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitLiquid1 * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitLiquid1 * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitLiquid1 * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitLiquid1 * 256)); |
523 | dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitLiquid2 * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitLiquid2 * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitLiquid2 * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitLiquid2 * 256)) |
524 | cac_dtp_table->usTemperatureLimitLiquid2 * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitLiquid2 * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitLiquid2 * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitLiquid2 * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitLiquid2 * 256)); |
525 | dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitVrVddc * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitVrVddc * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitVrVddc * 256) & 0xff00U) >> 8 ) : __swap16md(cac_dtp_table->usTemperatureLimitVrVddc * 256 )) |
526 | cac_dtp_table->usTemperatureLimitVrVddc * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitVrVddc * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitVrVddc * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitVrVddc * 256) & 0xff00U) >> 8 ) : __swap16md(cac_dtp_table->usTemperatureLimitVrVddc * 256 )); |
527 | dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitVrMvdd * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitVrMvdd * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitVrMvdd * 256) & 0xff00U) >> 8 ) : __swap16md(cac_dtp_table->usTemperatureLimitVrMvdd * 256 )) |
528 | cac_dtp_table->usTemperatureLimitVrMvdd * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitVrMvdd * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitVrMvdd * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitVrMvdd * 256) & 0xff00U) >> 8 ) : __swap16md(cac_dtp_table->usTemperatureLimitVrMvdd * 256 )); |
529 | dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitPlx * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitPlx * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitPlx * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitPlx * 256)) |
530 | cac_dtp_table->usTemperatureLimitPlx * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitPlx * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitPlx * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitPlx * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitPlx * 256)); |
531 | |
532 | dpm_table->FanGainEdge = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainEdge)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainEdge)) & 0xffU) << 8 | ((__uint16_t )(scale_fan_gain_settings(fan_table->usFanGainEdge)) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings(fan_table ->usFanGainEdge))) |
533 | scale_fan_gain_settings(fan_table->usFanGainEdge))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainEdge)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainEdge)) & 0xffU) << 8 | ((__uint16_t )(scale_fan_gain_settings(fan_table->usFanGainEdge)) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings(fan_table ->usFanGainEdge))); |
534 | dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainHotspot)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainHotspot)) & 0xffU) << 8 | ( (__uint16_t)(scale_fan_gain_settings(fan_table->usFanGainHotspot )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainHotspot))) |
535 | scale_fan_gain_settings(fan_table->usFanGainHotspot))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainHotspot)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainHotspot)) & 0xffU) << 8 | ( (__uint16_t)(scale_fan_gain_settings(fan_table->usFanGainHotspot )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainHotspot))); |
536 | dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainLiquid)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainLiquid)) & 0xffU) << 8 | (( __uint16_t)(scale_fan_gain_settings(fan_table->usFanGainLiquid )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainLiquid))) |
537 | scale_fan_gain_settings(fan_table->usFanGainLiquid))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainLiquid)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainLiquid)) & 0xffU) << 8 | (( __uint16_t)(scale_fan_gain_settings(fan_table->usFanGainLiquid )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainLiquid))); |
538 | dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainVrVddc)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainVrVddc)) & 0xffU) << 8 | (( __uint16_t)(scale_fan_gain_settings(fan_table->usFanGainVrVddc )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainVrVddc))) |
539 | scale_fan_gain_settings(fan_table->usFanGainVrVddc))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainVrVddc)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainVrVddc)) & 0xffU) << 8 | (( __uint16_t)(scale_fan_gain_settings(fan_table->usFanGainVrVddc )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainVrVddc))); |
540 | dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainVrMvdd)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainVrMvdd)) & 0xffU) << 8 | (( __uint16_t)(scale_fan_gain_settings(fan_table->usFanGainVrMvdd )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainVrMvdd))) |
541 | scale_fan_gain_settings(fan_table->usFanGainVrMvdd))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainVrMvdd)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainVrMvdd)) & 0xffU) << 8 | (( __uint16_t)(scale_fan_gain_settings(fan_table->usFanGainVrMvdd )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainVrMvdd))); |
542 | dpm_table->FanGainPlx = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainPlx)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainPlx)) & 0xffU) << 8 | ((__uint16_t )(scale_fan_gain_settings(fan_table->usFanGainPlx)) & 0xff00U ) >> 8) : __swap16md(scale_fan_gain_settings(fan_table-> usFanGainPlx))) |
543 | scale_fan_gain_settings(fan_table->usFanGainPlx))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainPlx)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainPlx)) & 0xffU) << 8 | ((__uint16_t )(scale_fan_gain_settings(fan_table->usFanGainPlx)) & 0xff00U ) >> 8) : __swap16md(scale_fan_gain_settings(fan_table-> usFanGainPlx))); |
544 | dpm_table->FanGainHbm = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainHbm)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainHbm)) & 0xffU) << 8 | ((__uint16_t )(scale_fan_gain_settings(fan_table->usFanGainHbm)) & 0xff00U ) >> 8) : __swap16md(scale_fan_gain_settings(fan_table-> usFanGainHbm))) |
545 | scale_fan_gain_settings(fan_table->usFanGainHbm))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainHbm)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainHbm)) & 0xffU) << 8 | ((__uint16_t )(scale_fan_gain_settings(fan_table->usFanGainHbm)) & 0xff00U ) >> 8) : __swap16md(scale_fan_gain_settings(fan_table-> usFanGainHbm))); |
546 | |
547 | dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address; |
548 | dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address; |
549 | dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address; |
550 | dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address; |
551 | |
552 | get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda); |
553 | dpm_table->Liquid_I2C_LineSCL = uc_scl; |
554 | dpm_table->Liquid_I2C_LineSDA = uc_sda; |
555 | |
556 | get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda); |
557 | dpm_table->Vr_I2C_LineSCL = uc_scl; |
558 | dpm_table->Vr_I2C_LineSDA = uc_sda; |
559 | |
560 | get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda); |
561 | dpm_table->Plx_I2C_LineSCL = uc_scl; |
562 | dpm_table->Plx_I2C_LineSDA = uc_sda; |
563 | |
564 | return 0; |
565 | } |
566 | |
567 | |
568 | static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) |
569 | { |
570 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
571 | const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; |
572 | |
573 | smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; |
574 | smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; |
575 | smu_data->power_tune_table.SviLoadLineTrimVddC = 3; |
576 | smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; |
577 | |
578 | return 0; |
579 | } |
580 | |
581 | |
582 | static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) |
583 | { |
584 | uint16_t tdc_limit; |
585 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
586 | struct phm_ppt_v1_information *table_info = |
587 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
588 | const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; |
589 | |
590 | /* TDC number of fraction bits are changed from 8 to 7 |
591 | * for Fiji as requested by SMC team |
592 | */ |
593 | tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); |
594 | smu_data->power_tune_table.TDC_VDDC_PkgLimit = |
595 | CONVERT_FROM_HOST_TO_SMC_US(tdc_limit)((tdc_limit) = (__uint16_t)(__builtin_constant_p(tdc_limit) ? (__uint16_t)(((__uint16_t)(tdc_limit) & 0xffU) << 8 | ((__uint16_t)(tdc_limit) & 0xff00U) >> 8) : __swap16md (tdc_limit))); |
596 | smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = |
597 | defaults->TDC_VDDC_ThrottleReleaseLimitPerc; |
598 | smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; |
599 | |
600 | return 0; |
601 | } |
602 | |
603 | static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) |
604 | { |
605 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
606 | const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; |
607 | uint32_t temp; |
608 | |
609 | if (smu7_read_smc_sram_dword(hwmgr, |
610 | fuse_table_offset + |
611 | offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl)__builtin_offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl), |
612 | (uint32_t *)&temp, SMC_RAM_END0x40000)) |
613 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!" ); return -22; } } while (0) |
614 | "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!" ); return -22; } } while (0) |
615 | return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!" ); return -22; } } while (0); |
616 | else { |
617 | smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; |
618 | smu_data->power_tune_table.LPMLTemperatureMin = |
619 | (uint8_t)((temp >> 16) & 0xff); |
620 | smu_data->power_tune_table.LPMLTemperatureMax = |
621 | (uint8_t)((temp >> 8) & 0xff); |
622 | smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); |
623 | } |
624 | return 0; |
625 | } |
626 | |
627 | static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) |
628 | { |
629 | int i; |
630 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
631 | |
632 | /* Currently not used. Set all to zero. */ |
633 | for (i = 0; i < 16; i++) |
634 | smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0; |
635 | |
636 | return 0; |
637 | } |
638 | |
639 | static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) |
640 | { |
641 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
642 | |
643 | if ((hwmgr->thermal_controller.advanceFanControlParameters. |
644 | usFanOutputSensitivity & (1 << 15)) || |
645 | 0 == hwmgr->thermal_controller.advanceFanControlParameters. |
646 | usFanOutputSensitivity) |
647 | hwmgr->thermal_controller.advanceFanControlParameters. |
648 | usFanOutputSensitivity = hwmgr->thermal_controller. |
649 | advanceFanControlParameters.usDefaultFanOutputSensitivity; |
650 | |
651 | smu_data->power_tune_table.FuzzyFan_PwmSetDelta = |
652 | PP_HOST_TO_SMC_US(hwmgr->thermal_controller.(__uint16_t)(__builtin_constant_p(hwmgr->thermal_controller . advanceFanControlParameters.usFanOutputSensitivity) ? (__uint16_t )(((__uint16_t)(hwmgr->thermal_controller. advanceFanControlParameters .usFanOutputSensitivity) & 0xffU) << 8 | ((__uint16_t )(hwmgr->thermal_controller. advanceFanControlParameters.usFanOutputSensitivity ) & 0xff00U) >> 8) : __swap16md(hwmgr->thermal_controller . advanceFanControlParameters.usFanOutputSensitivity)) |
653 | advanceFanControlParameters.usFanOutputSensitivity)(__uint16_t)(__builtin_constant_p(hwmgr->thermal_controller . advanceFanControlParameters.usFanOutputSensitivity) ? (__uint16_t )(((__uint16_t)(hwmgr->thermal_controller. advanceFanControlParameters .usFanOutputSensitivity) & 0xffU) << 8 | ((__uint16_t )(hwmgr->thermal_controller. advanceFanControlParameters.usFanOutputSensitivity ) & 0xff00U) >> 8) : __swap16md(hwmgr->thermal_controller . advanceFanControlParameters.usFanOutputSensitivity)); |
654 | return 0; |
655 | } |
656 | |
657 | static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) |
658 | { |
659 | int i; |
660 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
661 | |
662 | /* Currently not used. Set all to zero. */ |
663 | for (i = 0; i < 16; i++) |
664 | smu_data->power_tune_table.GnbLPML[i] = 0; |
665 | |
666 | return 0; |
667 | } |
668 | |
669 | static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) |
670 | { |
671 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
672 | struct phm_ppt_v1_information *table_info = |
673 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
674 | uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; |
Value stored to 'HiSidd' during its initialization is never read | |
675 | uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; |
676 | struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; |
677 | |
678 | HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); |
679 | LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); |
680 | |
681 | smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = |
682 | CONVERT_FROM_HOST_TO_SMC_US(HiSidd)((HiSidd) = (__uint16_t)(__builtin_constant_p(HiSidd) ? (__uint16_t )(((__uint16_t)(HiSidd) & 0xffU) << 8 | ((__uint16_t )(HiSidd) & 0xff00U) >> 8) : __swap16md(HiSidd))); |
683 | smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = |
684 | CONVERT_FROM_HOST_TO_SMC_US(LoSidd)((LoSidd) = (__uint16_t)(__builtin_constant_p(LoSidd) ? (__uint16_t )(((__uint16_t)(LoSidd) & 0xffU) << 8 | ((__uint16_t )(LoSidd) & 0xff00U) >> 8) : __swap16md(LoSidd))); |
685 | |
686 | return 0; |
687 | } |
688 | |
689 | static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) |
690 | { |
691 | uint32_t pm_fuse_table_offset; |
692 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
693 | |
694 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
695 | PHM_PlatformCaps_PowerContainment)) { |
696 | if (smu7_read_smc_sram_dword(hwmgr, |
697 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + |
698 | offsetof(SMU73_Firmware_Header, PmFuseTable)__builtin_offsetof(SMU73_Firmware_Header, PmFuseTable), |
699 | &pm_fuse_table_offset, SMC_RAM_END0x40000)) |
700 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to get pm_fuse_table_offset Failed!" ); return -22; } } while (0) |
701 | "Attempt to get pm_fuse_table_offset Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to get pm_fuse_table_offset Failed!" ); return -22; } } while (0) |
702 | return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to get pm_fuse_table_offset Failed!" ); return -22; } } while (0); |
703 | |
704 | /* DW6 */ |
705 | if (fiji_populate_svi_load_line(hwmgr)) |
706 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate SviLoadLine Failed!" ); return -22; } } while (0) |
707 | "Attempt to populate SviLoadLine Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate SviLoadLine Failed!" ); return -22; } } while (0) |
708 | return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate SviLoadLine Failed!" ); return -22; } } while (0); |
709 | /* DW7 */ |
710 | if (fiji_populate_tdc_limit(hwmgr)) |
711 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TDCLimit Failed!" ); return -22; } } while (0) |
712 | "Attempt to populate TDCLimit Failed!", return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TDCLimit Failed!" ); return -22; } } while (0); |
713 | /* DW8 */ |
714 | if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset)) |
715 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TdcWaterfallCtl, " "LPMLTemperature Min and Max Failed!"); return -22; } } while (0) |
716 | "Attempt to populate TdcWaterfallCtl, "do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TdcWaterfallCtl, " "LPMLTemperature Min and Max Failed!"); return -22; } } while (0) |
717 | "LPMLTemperature Min and Max Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TdcWaterfallCtl, " "LPMLTemperature Min and Max Failed!"); return -22; } } while (0) |
718 | return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TdcWaterfallCtl, " "LPMLTemperature Min and Max Failed!"); return -22; } } while (0); |
719 | |
720 | /* DW9-DW12 */ |
721 | if (0 != fiji_populate_temperature_scaler(hwmgr)) |
722 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate LPMLTemperatureScaler Failed!" ); return -22; } } while (0) |
723 | "Attempt to populate LPMLTemperatureScaler Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate LPMLTemperatureScaler Failed!" ); return -22; } } while (0) |
724 | return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate LPMLTemperatureScaler Failed!" ); return -22; } } while (0); |
725 | |
726 | /* DW13-DW14 */ |
727 | if (fiji_populate_fuzzy_fan(hwmgr)) |
728 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate Fuzzy Fan Control parameters Failed!" ); return -22; } } while (0) |
729 | "Attempt to populate Fuzzy Fan Control parameters Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate Fuzzy Fan Control parameters Failed!" ); return -22; } } while (0) |
730 | return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate Fuzzy Fan Control parameters Failed!" ); return -22; } } while (0); |
731 | |
732 | /* DW15-DW18 */ |
733 | if (fiji_populate_gnb_lpml(hwmgr)) |
734 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate GnbLPML Failed!" ); return -22; } } while (0) |
735 | "Attempt to populate GnbLPML Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate GnbLPML Failed!" ); return -22; } } while (0) |
736 | return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate GnbLPML Failed!" ); return -22; } } while (0); |
737 | |
738 | /* DW20 */ |
739 | if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr)) |
740 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!"); return -22; } } while (0) |
741 | "Attempt to populate BapmVddCBaseLeakage Hi and Lo "do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!"); return -22; } } while (0) |
742 | "Sidd Failed!", return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!"); return -22; } } while (0); |
743 | |
744 | if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, |
745 | (uint8_t *)&smu_data->power_tune_table, |
746 | sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END0x40000)) |
747 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to download PmFuseTable Failed!" ); return -22; } } while (0) |
748 | "Attempt to download PmFuseTable Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to download PmFuseTable Failed!" ); return -22; } } while (0) |
749 | return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to download PmFuseTable Failed!" ); return -22; } } while (0); |
750 | } |
751 | return 0; |
752 | } |
753 | |
754 | static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, |
755 | struct SMU73_Discrete_DpmTable *table) |
756 | { |
757 | uint32_t count; |
758 | uint8_t index; |
759 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
760 | struct phm_ppt_v1_information *table_info = |
761 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
762 | struct phm_ppt_v1_voltage_lookup_table *lookup_table = |
763 | table_info->vddc_lookup_table; |
764 | /* tables is already swapped, so in order to use the value from it, |
765 | * we need to swap it back. |
766 | * We are populating vddc CAC data to BapmVddc table |
767 | * in split and merged mode |
768 | */ |
769 | |
770 | for (count = 0; count < lookup_table->count; count++) { |
771 | index = phm_get_voltage_index(lookup_table, |
772 | data->vddc_voltage_table.entries[count].value); |
773 | table->BapmVddcVidLoSidd[count] = |
774 | convert_to_vid(lookup_table->entries[index].us_cac_low); |
775 | table->BapmVddcVidHiSidd[count] = |
776 | convert_to_vid(lookup_table->entries[index].us_cac_high); |
777 | } |
778 | |
779 | return 0; |
780 | } |
781 | |
782 | static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, |
783 | struct SMU73_Discrete_DpmTable *table) |
784 | { |
785 | int result; |
786 | |
787 | result = fiji_populate_cac_table(hwmgr, table); |
788 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate CAC voltage tables to SMC" ); return -22; } } while (0) |
789 | "can not populate CAC voltage tables to SMC",do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate CAC voltage tables to SMC" ); return -22; } } while (0) |
790 | return -EINVAL)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate CAC voltage tables to SMC" ); return -22; } } while (0); |
791 | |
792 | return 0; |
793 | } |
794 | |
795 | static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr, |
796 | struct SMU73_Discrete_Ulv *state) |
797 | { |
798 | int result = 0; |
799 | |
800 | struct phm_ppt_v1_information *table_info = |
801 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
802 | |
803 | state->CcPwrDynRm = 0; |
804 | state->CcPwrDynRm1 = 0; |
805 | |
806 | state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; |
807 | state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * |
808 | VOLTAGE_VID_OFFSET_SCALE2100 / VOLTAGE_VID_OFFSET_SCALE1625); |
809 | |
810 | state->VddcPhase = 1; |
811 | |
812 | if (!result) { |
813 | CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm)((state->CcPwrDynRm) = (__uint32_t)(__builtin_constant_p(state ->CcPwrDynRm) ? (__uint32_t)(((__uint32_t)(state->CcPwrDynRm ) & 0xff) << 24 | ((__uint32_t)(state->CcPwrDynRm ) & 0xff00) << 8 | ((__uint32_t)(state->CcPwrDynRm ) & 0xff0000) >> 8 | ((__uint32_t)(state->CcPwrDynRm ) & 0xff000000) >> 24) : __swap32md(state->CcPwrDynRm ))); |
814 | CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1)((state->CcPwrDynRm1) = (__uint32_t)(__builtin_constant_p( state->CcPwrDynRm1) ? (__uint32_t)(((__uint32_t)(state-> CcPwrDynRm1) & 0xff) << 24 | ((__uint32_t)(state-> CcPwrDynRm1) & 0xff00) << 8 | ((__uint32_t)(state-> CcPwrDynRm1) & 0xff0000) >> 8 | ((__uint32_t)(state ->CcPwrDynRm1) & 0xff000000) >> 24) : __swap32md (state->CcPwrDynRm1))); |
815 | CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset)((state->VddcOffset) = (__uint16_t)(__builtin_constant_p(state ->VddcOffset) ? (__uint16_t)(((__uint16_t)(state->VddcOffset ) & 0xffU) << 8 | ((__uint16_t)(state->VddcOffset ) & 0xff00U) >> 8) : __swap16md(state->VddcOffset ))); |
816 | } |
817 | return result; |
818 | } |
819 | |
820 | static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr, |
821 | struct SMU73_Discrete_DpmTable *table) |
822 | { |
823 | return fiji_populate_ulv_level(hwmgr, &table->Ulv); |
824 | } |
825 | |
826 | static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr, |
827 | struct SMU73_Discrete_DpmTable *table) |
828 | { |
829 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
830 | struct smu7_dpm_table *dpm_table = &data->dpm_table; |
831 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
832 | int i; |
833 | |
834 | /* Index (dpm_table->pcie_speed_table.count) |
835 | * is reserved for PCIE boot level. */ |
836 | for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { |
837 | table->LinkLevel[i].PcieGenSpeed = |
838 | (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; |
839 | table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( |
840 | dpm_table->pcie_speed_table.dpm_levels[i].param1); |
841 | table->LinkLevel[i].EnabledForActivity = 1; |
842 | table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); |
843 | table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5)(__uint32_t)(__builtin_constant_p(5) ? (__uint32_t)(((__uint32_t )(5) & 0xff) << 24 | ((__uint32_t)(5) & 0xff00) << 8 | ((__uint32_t)(5) & 0xff0000) >> 8 | ( (__uint32_t)(5) & 0xff000000) >> 24) : __swap32md(5 )); |
844 | table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30)(__uint32_t)(__builtin_constant_p(30) ? (__uint32_t)(((__uint32_t )(30) & 0xff) << 24 | ((__uint32_t)(30) & 0xff00 ) << 8 | ((__uint32_t)(30) & 0xff0000) >> 8 | ((__uint32_t)(30) & 0xff000000) >> 24) : __swap32md (30)); |
845 | } |
846 | |
847 | smu_data->smc_state_table.LinkLevelCount = |
848 | (uint8_t)dpm_table->pcie_speed_table.count; |
849 | data->dpm_level_enable_mask.pcie_dpm_enable_mask = |
850 | phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); |
851 | |
852 | return 0; |
853 | } |
854 | |
855 | static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr, |
856 | uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk) |
857 | { |
858 | const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
859 | struct pp_atomctrl_clock_dividers_vi dividers; |
860 | uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; |
861 | uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; |
862 | uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; |
863 | uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; |
864 | uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; |
865 | uint32_t ref_clock; |
866 | uint32_t ref_divider; |
867 | uint32_t fbdiv; |
868 | int result; |
869 | |
870 | /* get the engine clock dividers for this clock value */ |
871 | result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs); |
872 | |
873 | PP_ASSERT_WITH_CODE(result == 0,do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS." ); return result; } } while (0) |
874 | "Error retrieving Engine Clock dividers from VBIOS.",do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS." ); return result; } } while (0) |
875 | return result)do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS." ); return result; } } while (0); |
876 | |
877 | /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */ |
878 | ref_clock = atomctrl_get_reference_clock(hwmgr); |
879 | ref_divider = 1 + dividers.uc_pll_ref_div; |
880 | |
881 | /* low 14 bits is fraction and high 12 bits is divider */ |
882 | fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; |
883 | |
884 | /* SPLL_FUNC_CNTL setup */ |
885 | spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,(((spll_func_cntl) & ~0x7e0) | (0x7e0 & ((dividers.uc_pll_ref_div ) << 0x5))) |
886 | SPLL_REF_DIV, dividers.uc_pll_ref_div)(((spll_func_cntl) & ~0x7e0) | (0x7e0 & ((dividers.uc_pll_ref_div ) << 0x5))); |
887 | spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,(((spll_func_cntl) & ~0x7f00000) | (0x7f00000 & ((dividers .uc_pll_post_div) << 0x14))) |
888 | SPLL_PDIV_A, dividers.uc_pll_post_div)(((spll_func_cntl) & ~0x7f00000) | (0x7f00000 & ((dividers .uc_pll_post_div) << 0x14))); |
889 | |
890 | /* SPLL_FUNC_CNTL_3 setup*/ |
891 | spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,(((spll_func_cntl_3) & ~0x3ffffff) | (0x3ffffff & ((fbdiv ) << 0x0))) |
892 | SPLL_FB_DIV, fbdiv)(((spll_func_cntl_3) & ~0x3ffffff) | (0x3ffffff & ((fbdiv ) << 0x0))); |
893 | |
894 | /* set to use fractional accumulation*/ |
895 | spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,(((spll_func_cntl_3) & ~0x10000000) | (0x10000000 & ( (1) << 0x1c))) |
896 | SPLL_DITHEN, 1)(((spll_func_cntl_3) & ~0x10000000) | (0x10000000 & ( (1) << 0x1c))); |
897 | |
898 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
899 | PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { |
900 | struct pp_atomctrl_internal_ss_info ssInfo; |
901 | |
902 | uint32_t vco_freq = clock * dividers.uc_pll_post_div; |
903 | if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr, |
904 | vco_freq, &ssInfo)) { |
905 | /* |
906 | * ss_info.speed_spectrum_percentage -- in unit of 0.01% |
907 | * ss_info.speed_spectrum_rate -- in unit of khz |
908 | * |
909 | * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 |
910 | */ |
911 | uint32_t clk_s = ref_clock * 5 / |
912 | (ref_divider * ssInfo.speed_spectrum_rate); |
913 | /* clkv = 2 * D * fbdiv / NS */ |
914 | uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage * |
915 | fbdiv / (clk_s * 10000); |
916 | |
917 | cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,(((cg_spll_spread_spectrum) & ~0xfff0) | (0xfff0 & (( clk_s) << 0x4))) |
918 | CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s)(((cg_spll_spread_spectrum) & ~0xfff0) | (0xfff0 & (( clk_s) << 0x4))); |
919 | cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,(((cg_spll_spread_spectrum) & ~0x1) | (0x1 & ((1) << 0x0))) |
920 | CG_SPLL_SPREAD_SPECTRUM, SSEN, 1)(((cg_spll_spread_spectrum) & ~0x1) | (0x1 & ((1) << 0x0))); |
921 | cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,(((cg_spll_spread_spectrum_2) & ~0x3ffffff) | (0x3ffffff & ((clk_v) << 0x0))) |
922 | CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v)(((cg_spll_spread_spectrum_2) & ~0x3ffffff) | (0x3ffffff & ((clk_v) << 0x0))); |
923 | } |
924 | } |
925 | |
926 | sclk->SclkFrequency = clock; |
927 | sclk->CgSpllFuncCntl3 = spll_func_cntl_3; |
928 | sclk->CgSpllFuncCntl4 = spll_func_cntl_4; |
929 | sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; |
930 | sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; |
931 | sclk->SclkDid = (uint8_t)dividers.pll_post_divider; |
932 | |
933 | return 0; |
934 | } |
935 | |
936 | static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, |
937 | uint32_t clock, struct SMU73_Discrete_GraphicsLevel *level) |
938 | { |
939 | int result; |
940 | /* PP_Clocks minClocks; */ |
941 | uint32_t mvdd; |
942 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
943 | struct phm_ppt_v1_information *table_info = |
944 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
945 | phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL((void *)0); |
946 | |
947 | result = fiji_calculate_sclk_params(hwmgr, clock, level); |
948 | |
949 | if (hwmgr->od_enabled) |
950 | vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_sclk; |
951 | else |
952 | vdd_dep_table = table_info->vdd_dep_on_sclk; |
953 | |
954 | /* populate graphics levels */ |
955 | result = fiji_get_dependency_volt_by_clk(hwmgr, |
956 | vdd_dep_table, clock, |
957 | (uint32_t *)(&level->MinVoltage), &mvdd); |
958 | PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find VDDC voltage value for " "VDDC engine clock dependency table" ); return result; } } while (0) |
959 | "can not find VDDC voltage value for "do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find VDDC voltage value for " "VDDC engine clock dependency table" ); return result; } } while (0) |
960 | "VDDC engine clock dependency table",do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find VDDC voltage value for " "VDDC engine clock dependency table" ); return result; } } while (0) |
961 | return result)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find VDDC voltage value for " "VDDC engine clock dependency table" ); return result; } } while (0); |
962 | |
963 | level->SclkFrequency = clock; |
964 | level->ActivityLevel = data->current_profile_setting.sclk_activity; |
965 | level->CcPwrDynRm = 0; |
966 | level->CcPwrDynRm1 = 0; |
967 | level->EnabledForActivity = 0; |
968 | level->EnabledForThrottle = 1; |
969 | level->UpHyst = data->current_profile_setting.sclk_up_hyst; |
970 | level->DownHyst = data->current_profile_setting.sclk_down_hyst; |
971 | level->VoltageDownHyst = 0; |
972 | level->PowerThrottle = 0; |
973 | |
974 | data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr; |
975 | |
976 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) |
977 | level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock, |
978 | hwmgr->display_config->min_core_set_clock_in_sr); |
979 | |
980 | |
981 | /* Default to slow, highest DPM level will be |
982 | * set to PPSMC_DISPLAY_WATERMARK_LOW later. |
983 | */ |
984 | level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW0; |
985 | |
986 | CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage)((level->MinVoltage) = (__uint32_t)(__builtin_constant_p(level ->MinVoltage) ? (__uint32_t)(((__uint32_t)(level->MinVoltage ) & 0xff) << 24 | ((__uint32_t)(level->MinVoltage ) & 0xff00) << 8 | ((__uint32_t)(level->MinVoltage ) & 0xff0000) >> 8 | ((__uint32_t)(level->MinVoltage ) & 0xff000000) >> 24) : __swap32md(level->MinVoltage ))); |
987 | CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency)((level->SclkFrequency) = (__uint32_t)(__builtin_constant_p (level->SclkFrequency) ? (__uint32_t)(((__uint32_t)(level-> SclkFrequency) & 0xff) << 24 | ((__uint32_t)(level-> SclkFrequency) & 0xff00) << 8 | ((__uint32_t)(level ->SclkFrequency) & 0xff0000) >> 8 | ((__uint32_t )(level->SclkFrequency) & 0xff000000) >> 24) : __swap32md (level->SclkFrequency))); |
988 | CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel)((level->ActivityLevel) = (__uint16_t)(__builtin_constant_p (level->ActivityLevel) ? (__uint16_t)(((__uint16_t)(level-> ActivityLevel) & 0xffU) << 8 | ((__uint16_t)(level-> ActivityLevel) & 0xff00U) >> 8) : __swap16md(level-> ActivityLevel))); |
989 | CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3)((level->CgSpllFuncCntl3) = (__uint32_t)(__builtin_constant_p (level->CgSpllFuncCntl3) ? (__uint32_t)(((__uint32_t)(level ->CgSpllFuncCntl3) & 0xff) << 24 | ((__uint32_t) (level->CgSpllFuncCntl3) & 0xff00) << 8 | ((__uint32_t )(level->CgSpllFuncCntl3) & 0xff0000) >> 8 | ((__uint32_t )(level->CgSpllFuncCntl3) & 0xff000000) >> 24) : __swap32md(level->CgSpllFuncCntl3))); |
990 | CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4)((level->CgSpllFuncCntl4) = (__uint32_t)(__builtin_constant_p (level->CgSpllFuncCntl4) ? (__uint32_t)(((__uint32_t)(level ->CgSpllFuncCntl4) & 0xff) << 24 | ((__uint32_t) (level->CgSpllFuncCntl4) & 0xff00) << 8 | ((__uint32_t )(level->CgSpllFuncCntl4) & 0xff0000) >> 8 | ((__uint32_t )(level->CgSpllFuncCntl4) & 0xff000000) >> 24) : __swap32md(level->CgSpllFuncCntl4))); |
991 | CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum)((level->SpllSpreadSpectrum) = (__uint32_t)(__builtin_constant_p (level->SpllSpreadSpectrum) ? (__uint32_t)(((__uint32_t)(level ->SpllSpreadSpectrum) & 0xff) << 24 | ((__uint32_t )(level->SpllSpreadSpectrum) & 0xff00) << 8 | (( __uint32_t)(level->SpllSpreadSpectrum) & 0xff0000) >> 8 | ((__uint32_t)(level->SpllSpreadSpectrum) & 0xff000000 ) >> 24) : __swap32md(level->SpllSpreadSpectrum))); |
992 | CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2)((level->SpllSpreadSpectrum2) = (__uint32_t)(__builtin_constant_p (level->SpllSpreadSpectrum2) ? (__uint32_t)(((__uint32_t)( level->SpllSpreadSpectrum2) & 0xff) << 24 | ((__uint32_t )(level->SpllSpreadSpectrum2) & 0xff00) << 8 | ( (__uint32_t)(level->SpllSpreadSpectrum2) & 0xff0000) >> 8 | ((__uint32_t)(level->SpllSpreadSpectrum2) & 0xff000000 ) >> 24) : __swap32md(level->SpllSpreadSpectrum2))); |
993 | CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm)((level->CcPwrDynRm) = (__uint32_t)(__builtin_constant_p(level ->CcPwrDynRm) ? (__uint32_t)(((__uint32_t)(level->CcPwrDynRm ) & 0xff) << 24 | ((__uint32_t)(level->CcPwrDynRm ) & 0xff00) << 8 | ((__uint32_t)(level->CcPwrDynRm ) & 0xff0000) >> 8 | ((__uint32_t)(level->CcPwrDynRm ) & 0xff000000) >> 24) : __swap32md(level->CcPwrDynRm ))); |
994 | CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1)((level->CcPwrDynRm1) = (__uint32_t)(__builtin_constant_p( level->CcPwrDynRm1) ? (__uint32_t)(((__uint32_t)(level-> CcPwrDynRm1) & 0xff) << 24 | ((__uint32_t)(level-> CcPwrDynRm1) & 0xff00) << 8 | ((__uint32_t)(level-> CcPwrDynRm1) & 0xff0000) >> 8 | ((__uint32_t)(level ->CcPwrDynRm1) & 0xff000000) >> 24) : __swap32md (level->CcPwrDynRm1))); |
995 | |
996 | return 0; |
997 | } |
998 | |
999 | static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) |
1000 | { |
1001 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1002 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
1003 | |
1004 | struct smu7_dpm_table *dpm_table = &data->dpm_table; |
1005 | struct phm_ppt_v1_information *table_info = |
1006 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
1007 | struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; |
1008 | uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count; |
1009 | int result = 0; |
1010 | uint32_t array = smu_data->smu7_data.dpm_table_start + |
1011 | offsetof(SMU73_Discrete_DpmTable, GraphicsLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); |
1012 | uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) * |
1013 | SMU73_MAX_LEVELS_GRAPHICS8; |
1014 | struct SMU73_Discrete_GraphicsLevel *levels = |
1015 | smu_data->smc_state_table.GraphicsLevel; |
1016 | uint32_t i, max_entry; |
1017 | uint8_t hightest_pcie_level_enabled = 0, |
1018 | lowest_pcie_level_enabled = 0, |
1019 | mid_pcie_level_enabled = 0, |
1020 | count = 0; |
1021 | |
1022 | for (i = 0; i < dpm_table->sclk_table.count; i++) { |
1023 | result = fiji_populate_single_graphic_level(hwmgr, |
1024 | dpm_table->sclk_table.dpm_levels[i].value, |
1025 | &levels[i]); |
1026 | if (result) |
1027 | return result; |
1028 | |
1029 | /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ |
1030 | if (i > 1) |
1031 | levels[i].DeepSleepDivId = 0; |
1032 | } |
1033 | |
1034 | /* Only enable level 0 for now.*/ |
1035 | levels[0].EnabledForActivity = 1; |
1036 | |
1037 | /* set highest level watermark to high */ |
1038 | levels[dpm_table->sclk_table.count - 1].DisplayWatermark = |
1039 | PPSMC_DISPLAY_WATERMARK_HIGH1; |
1040 | |
1041 | smu_data->smc_state_table.GraphicsDpmLevelCount = |
1042 | (uint8_t)dpm_table->sclk_table.count; |
1043 | data->dpm_level_enable_mask.sclk_dpm_enable_mask = |
1044 | phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); |
1045 | |
1046 | if (pcie_table != NULL((void *)0)) { |
1047 | PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),do { if (!((1 <= pcie_entry_cnt))) { printk("\0014" "amdgpu: " "%s\n", "There must be 1 or more PCIE levels defined in PPTable." ); return -22; } } while (0) |
1048 | "There must be 1 or more PCIE levels defined in PPTable.",do { if (!((1 <= pcie_entry_cnt))) { printk("\0014" "amdgpu: " "%s\n", "There must be 1 or more PCIE levels defined in PPTable." ); return -22; } } while (0) |
1049 | return -EINVAL)do { if (!((1 <= pcie_entry_cnt))) { printk("\0014" "amdgpu: " "%s\n", "There must be 1 or more PCIE levels defined in PPTable." ); return -22; } } while (0); |
1050 | max_entry = pcie_entry_cnt - 1; |
1051 | for (i = 0; i < dpm_table->sclk_table.count; i++) |
1052 | levels[i].pcieDpmLevel = |
1053 | (uint8_t) ((i < max_entry) ? i : max_entry); |
1054 | } else { |
1055 | while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && |
1056 | ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & |
1057 | (1 << (hightest_pcie_level_enabled + 1))) != 0)) |
1058 | hightest_pcie_level_enabled++; |
1059 | |
1060 | while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && |
1061 | ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & |
1062 | (1 << lowest_pcie_level_enabled)) == 0)) |
1063 | lowest_pcie_level_enabled++; |
1064 | |
1065 | while ((count < hightest_pcie_level_enabled) && |
1066 | ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & |
1067 | (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) |
1068 | count++; |
1069 | |
1070 | mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) < |
1071 | hightest_pcie_level_enabled ? |
1072 | (lowest_pcie_level_enabled + 1 + count) : |
1073 | hightest_pcie_level_enabled; |
1074 | |
1075 | /* set pcieDpmLevel to hightest_pcie_level_enabled */ |
1076 | for (i = 2; i < dpm_table->sclk_table.count; i++) |
1077 | levels[i].pcieDpmLevel = hightest_pcie_level_enabled; |
1078 | |
1079 | /* set pcieDpmLevel to lowest_pcie_level_enabled */ |
1080 | levels[0].pcieDpmLevel = lowest_pcie_level_enabled; |
1081 | |
1082 | /* set pcieDpmLevel to mid_pcie_level_enabled */ |
1083 | levels[1].pcieDpmLevel = mid_pcie_level_enabled; |
1084 | } |
1085 | /* level count will send to smc once at init smc table and never change */ |
1086 | result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, |
1087 | (uint32_t)array_size, SMC_RAM_END0x40000); |
1088 | |
1089 | return result; |
1090 | } |
1091 | |
1092 | |
1093 | /* |
1094 | * MCLK Frequency Ratio |
1095 | * SEQ_CG_RESP Bit[31:24] - 0x0 |
1096 | * Bit[27:24] \96 DDR3 Frequency ratio |
1097 | * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz |
1098 | * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz |
1099 | * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz |
1100 | * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz |
1101 | * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz |
1102 | * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz |
1103 | * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz |
1104 | * 400 < 0x7 <= 450MHz, 800 < 0xF |
1105 | */ |
1106 | static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock) |
1107 | { |
1108 | if (mem_clock <= 10000) |
1109 | return 0x0; |
1110 | if (mem_clock <= 15000) |
1111 | return 0x1; |
1112 | if (mem_clock <= 20000) |
1113 | return 0x2; |
1114 | if (mem_clock <= 25000) |
1115 | return 0x3; |
1116 | if (mem_clock <= 30000) |
1117 | return 0x4; |
1118 | if (mem_clock <= 35000) |
1119 | return 0x5; |
1120 | if (mem_clock <= 40000) |
1121 | return 0x6; |
1122 | if (mem_clock <= 45000) |
1123 | return 0x7; |
1124 | if (mem_clock <= 50000) |
1125 | return 0x8; |
1126 | if (mem_clock <= 55000) |
1127 | return 0x9; |
1128 | if (mem_clock <= 60000) |
1129 | return 0xa; |
1130 | if (mem_clock <= 65000) |
1131 | return 0xb; |
1132 | if (mem_clock <= 70000) |
1133 | return 0xc; |
1134 | if (mem_clock <= 75000) |
1135 | return 0xd; |
1136 | if (mem_clock <= 80000) |
1137 | return 0xe; |
1138 | /* mem_clock > 800MHz */ |
1139 | return 0xf; |
1140 | } |
1141 | |
1142 | static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr, |
1143 | uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk) |
1144 | { |
1145 | struct pp_atomctrl_memory_clock_param mem_param; |
1146 | int result; |
1147 | |
1148 | result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param); |
1149 | PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Failed to get Memory PLL Dividers."); ; } } while (0) |
1150 | "Failed to get Memory PLL Dividers.",do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Failed to get Memory PLL Dividers."); ; } } while (0) |
1151 | )do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Failed to get Memory PLL Dividers."); ; } } while (0); |
1152 | |
1153 | /* Save the result data to outpupt memory level structure */ |
1154 | mclk->MclkFrequency = clock; |
1155 | mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider; |
1156 | mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock); |
1157 | |
1158 | return result; |
1159 | } |
1160 | |
1161 | static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr, |
1162 | uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level) |
1163 | { |
1164 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1165 | struct phm_ppt_v1_information *table_info = |
1166 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
1167 | int result = 0; |
1168 | uint32_t mclk_stutter_mode_threshold = 60000; |
1169 | phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL((void *)0); |
1170 | |
1171 | if (hwmgr->od_enabled) |
1172 | vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk; |
1173 | else |
1174 | vdd_dep_table = table_info->vdd_dep_on_mclk; |
1175 | |
1176 | if (vdd_dep_table) { |
1177 | result = fiji_get_dependency_volt_by_clk(hwmgr, |
1178 | vdd_dep_table, clock, |
1179 | (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd); |
1180 | PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find MinVddc voltage value from memory " "VDDC voltage dependency table" ); return result; } } while (0) |
1181 | "can not find MinVddc voltage value from memory "do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find MinVddc voltage value from memory " "VDDC voltage dependency table" ); return result; } } while (0) |
1182 | "VDDC voltage dependency table", return result)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find MinVddc voltage value from memory " "VDDC voltage dependency table" ); return result; } } while (0); |
1183 | } |
1184 | |
1185 | mem_level->EnabledForThrottle = 1; |
1186 | mem_level->EnabledForActivity = 0; |
1187 | mem_level->UpHyst = data->current_profile_setting.mclk_up_hyst; |
1188 | mem_level->DownHyst = data->current_profile_setting.mclk_down_hyst; |
1189 | mem_level->VoltageDownHyst = 0; |
1190 | mem_level->ActivityLevel = data->current_profile_setting.mclk_activity; |
1191 | mem_level->StutterEnable = false0; |
1192 | |
1193 | mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW0; |
1194 | |
1195 | /* enable stutter mode if all the follow condition applied |
1196 | * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI, |
1197 | * &(data->DisplayTiming.numExistingDisplays)); |
1198 | */ |
1199 | data->display_timing.num_existing_displays = hwmgr->display_config->num_display; |
1200 | data->display_timing.vrefresh = hwmgr->display_config->vrefresh; |
1201 | |
1202 | if (mclk_stutter_mode_threshold && |
1203 | (clock <= mclk_stutter_mode_threshold) && |
1204 | (!data->is_uvd_enabled) && |
1205 | (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,((((((struct cgs_device *)hwmgr->device)->ops->read_register (hwmgr->device,0x1b35))) & 0x1) >> 0x0) |
1206 | STUTTER_ENABLE)((((((struct cgs_device *)hwmgr->device)->ops->read_register (hwmgr->device,0x1b35))) & 0x1) >> 0x0) & 0x1)) |
1207 | mem_level->StutterEnable = true1; |
1208 | |
1209 | result = fiji_calculate_mclk_params(hwmgr, clock, mem_level); |
1210 | if (!result) { |
1211 | CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd)((mem_level->MinMvdd) = (__uint32_t)(__builtin_constant_p( mem_level->MinMvdd) ? (__uint32_t)(((__uint32_t)(mem_level ->MinMvdd) & 0xff) << 24 | ((__uint32_t)(mem_level ->MinMvdd) & 0xff00) << 8 | ((__uint32_t)(mem_level ->MinMvdd) & 0xff0000) >> 8 | ((__uint32_t)(mem_level ->MinMvdd) & 0xff000000) >> 24) : __swap32md(mem_level ->MinMvdd))); |
1212 | CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency)((mem_level->MclkFrequency) = (__uint32_t)(__builtin_constant_p (mem_level->MclkFrequency) ? (__uint32_t)(((__uint32_t)(mem_level ->MclkFrequency) & 0xff) << 24 | ((__uint32_t)(mem_level ->MclkFrequency) & 0xff00) << 8 | ((__uint32_t)( mem_level->MclkFrequency) & 0xff0000) >> 8 | ((__uint32_t )(mem_level->MclkFrequency) & 0xff000000) >> 24) : __swap32md(mem_level->MclkFrequency))); |
1213 | CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel)((mem_level->ActivityLevel) = (__uint16_t)(__builtin_constant_p (mem_level->ActivityLevel) ? (__uint16_t)(((__uint16_t)(mem_level ->ActivityLevel) & 0xffU) << 8 | ((__uint16_t)(mem_level ->ActivityLevel) & 0xff00U) >> 8) : __swap16md(mem_level ->ActivityLevel))); |
1214 | CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage)((mem_level->MinVoltage) = (__uint32_t)(__builtin_constant_p (mem_level->MinVoltage) ? (__uint32_t)(((__uint32_t)(mem_level ->MinVoltage) & 0xff) << 24 | ((__uint32_t)(mem_level ->MinVoltage) & 0xff00) << 8 | ((__uint32_t)(mem_level ->MinVoltage) & 0xff0000) >> 8 | ((__uint32_t)(mem_level ->MinVoltage) & 0xff000000) >> 24) : __swap32md( mem_level->MinVoltage))); |
1215 | } |
1216 | return result; |
1217 | } |
1218 | |
1219 | static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) |
1220 | { |
1221 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1222 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
1223 | struct smu7_dpm_table *dpm_table = &data->dpm_table; |
1224 | int result; |
1225 | /* populate MCLK dpm table to SMU7 */ |
1226 | uint32_t array = smu_data->smu7_data.dpm_table_start + |
1227 | offsetof(SMU73_Discrete_DpmTable, MemoryLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, MemoryLevel); |
1228 | uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) * |
1229 | SMU73_MAX_LEVELS_MEMORY4; |
1230 | struct SMU73_Discrete_MemoryLevel *levels = |
1231 | smu_data->smc_state_table.MemoryLevel; |
1232 | uint32_t i; |
1233 | |
1234 | for (i = 0; i < dpm_table->mclk_table.count; i++) { |
1235 | PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),do { if (!((0 != dpm_table->mclk_table.dpm_levels[i].value ))) { printk("\0014" "amdgpu: " "%s\n", "can not populate memory level as memory clock is zero" ); return -22; } } while (0) |
1236 | "can not populate memory level as memory clock is zero",do { if (!((0 != dpm_table->mclk_table.dpm_levels[i].value ))) { printk("\0014" "amdgpu: " "%s\n", "can not populate memory level as memory clock is zero" ); return -22; } } while (0) |
1237 | return -EINVAL)do { if (!((0 != dpm_table->mclk_table.dpm_levels[i].value ))) { printk("\0014" "amdgpu: " "%s\n", "can not populate memory level as memory clock is zero" ); return -22; } } while (0); |
1238 | result = fiji_populate_single_memory_level(hwmgr, |
1239 | dpm_table->mclk_table.dpm_levels[i].value, |
1240 | &levels[i]); |
1241 | if (result) |
1242 | return result; |
1243 | } |
1244 | |
1245 | /* Only enable level 0 for now. */ |
1246 | levels[0].EnabledForActivity = 1; |
1247 | |
1248 | /* in order to prevent MC activity from stutter mode to push DPM up. |
1249 | * the UVD change complements this by putting the MCLK in |
1250 | * a higher state by default such that we are not effected by |
1251 | * up threshold or and MCLK DPM latency. |
1252 | */ |
1253 | levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; |
1254 | CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel)((levels[0].ActivityLevel) = (__uint16_t)(__builtin_constant_p (levels[0].ActivityLevel) ? (__uint16_t)(((__uint16_t)(levels [0].ActivityLevel) & 0xffU) << 8 | ((__uint16_t)(levels [0].ActivityLevel) & 0xff00U) >> 8) : __swap16md(levels [0].ActivityLevel))); |
1255 | |
1256 | smu_data->smc_state_table.MemoryDpmLevelCount = |
1257 | (uint8_t)dpm_table->mclk_table.count; |
1258 | data->dpm_level_enable_mask.mclk_dpm_enable_mask = |
1259 | phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); |
1260 | /* set highest level watermark to high */ |
1261 | levels[dpm_table->mclk_table.count - 1].DisplayWatermark = |
1262 | PPSMC_DISPLAY_WATERMARK_HIGH1; |
1263 | |
1264 | /* level count will send to smc once at init smc table and never change */ |
1265 | result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, |
1266 | (uint32_t)array_size, SMC_RAM_END0x40000); |
1267 | |
1268 | return result; |
1269 | } |
1270 | |
1271 | static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr, |
1272 | uint32_t mclk, SMIO_Pattern *smio_pat) |
1273 | { |
1274 | const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1275 | struct phm_ppt_v1_information *table_info = |
1276 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
1277 | uint32_t i = 0; |
1278 | |
1279 | if (SMU7_VOLTAGE_CONTROL_NONE0x0 != data->mvdd_control) { |
1280 | /* find mvdd value which clock is more than request */ |
1281 | for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { |
1282 | if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { |
1283 | smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; |
1284 | break; |
1285 | } |
1286 | } |
1287 | PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,do { if (!(i < table_info->vdd_dep_on_mclk->count)) { printk("\0014" "amdgpu: " "%s\n", "MVDD Voltage is outside the supported range." ); return -22; } } while (0) |
1288 | "MVDD Voltage is outside the supported range.",do { if (!(i < table_info->vdd_dep_on_mclk->count)) { printk("\0014" "amdgpu: " "%s\n", "MVDD Voltage is outside the supported range." ); return -22; } } while (0) |
1289 | return -EINVAL)do { if (!(i < table_info->vdd_dep_on_mclk->count)) { printk("\0014" "amdgpu: " "%s\n", "MVDD Voltage is outside the supported range." ); return -22; } } while (0); |
1290 | } else |
1291 | return -EINVAL22; |
1292 | |
1293 | return 0; |
1294 | } |
1295 | |
1296 | static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, |
1297 | SMU73_Discrete_DpmTable *table) |
1298 | { |
1299 | int result = 0; |
1300 | const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1301 | struct phm_ppt_v1_information *table_info = |
1302 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
1303 | struct pp_atomctrl_clock_dividers_vi dividers; |
1304 | SMIO_Pattern vol_level; |
1305 | uint32_t mvdd; |
1306 | uint16_t us_mvdd; |
1307 | uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; |
1308 | uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; |
1309 | |
1310 | table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC0x01; |
1311 | |
1312 | if (!data->sclk_dpm_key_disabled) { |
1313 | /* Get MinVoltage and Frequency from DPM0, |
1314 | * already converted to SMC_UL */ |
1315 | table->ACPILevel.SclkFrequency = |
1316 | data->dpm_table.sclk_table.dpm_levels[0].value; |
1317 | result = fiji_get_dependency_volt_by_clk(hwmgr, |
1318 | table_info->vdd_dep_on_sclk, |
1319 | table->ACPILevel.SclkFrequency, |
1320 | (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd); |
1321 | PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDC voltage value " "in Clock Dependency Table" ); ; } } while (0) |
1322 | "Cannot find ACPI VDDC voltage value " \do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDC voltage value " "in Clock Dependency Table" ); ; } } while (0) |
1323 | "in Clock Dependency Table",do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDC voltage value " "in Clock Dependency Table" ); ; } } while (0) |
1324 | )do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDC voltage value " "in Clock Dependency Table" ); ; } } while (0); |
1325 | } else { |
1326 | table->ACPILevel.SclkFrequency = |
1327 | data->vbios_boot_state.sclk_bootup_value; |
1328 | table->ACPILevel.MinVoltage = |
1329 | data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE4; |
1330 | } |
1331 | |
1332 | /* get the engine clock dividers for this clock value */ |
1333 | result = atomctrl_get_engine_pll_dividers_vi(hwmgr, |
1334 | table->ACPILevel.SclkFrequency, ÷rs); |
1335 | PP_ASSERT_WITH_CODE(result == 0,do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS." ); return result; } } while (0) |
1336 | "Error retrieving Engine Clock dividers from VBIOS.",do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS." ); return result; } } while (0) |
1337 | return result)do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS." ); return result; } } while (0); |
1338 | |
1339 | table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; |
1340 | table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW0; |
1341 | table->ACPILevel.DeepSleepDivId = 0; |
1342 | |
1343 | spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,(((spll_func_cntl) & ~0x2) | (0x2 & ((0) << 0x1 ))) |
1344 | SPLL_PWRON, 0)(((spll_func_cntl) & ~0x2) | (0x2 & ((0) << 0x1 ))); |
1345 | spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,(((spll_func_cntl) & ~0x1) | (0x1 & ((1) << 0x0 ))) |
1346 | SPLL_RESET, 1)(((spll_func_cntl) & ~0x1) | (0x1 & ((1) << 0x0 ))); |
1347 | spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,(((spll_func_cntl_2) & ~0x1ff) | (0x1ff & ((4) << 0x0))) |
1348 | SCLK_MUX_SEL, 4)(((spll_func_cntl_2) & ~0x1ff) | (0x1ff & ((4) << 0x0))); |
1349 | |
1350 | table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; |
1351 | table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; |
1352 | table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; |
1353 | table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; |
1354 | table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; |
1355 | table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; |
1356 | table->ACPILevel.CcPwrDynRm = 0; |
1357 | table->ACPILevel.CcPwrDynRm1 = 0; |
1358 | |
1359 | CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags)((table->ACPILevel.Flags) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.Flags) ? (__uint32_t)(((__uint32_t)(table ->ACPILevel.Flags) & 0xff) << 24 | ((__uint32_t) (table->ACPILevel.Flags) & 0xff00) << 8 | ((__uint32_t )(table->ACPILevel.Flags) & 0xff0000) >> 8 | ((__uint32_t )(table->ACPILevel.Flags) & 0xff000000) >> 24) : __swap32md(table->ACPILevel.Flags))); |
1360 | CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency)((table->ACPILevel.SclkFrequency) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.SclkFrequency) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.SclkFrequency) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.SclkFrequency) & 0xff00 ) << 8 | ((__uint32_t)(table->ACPILevel.SclkFrequency ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .SclkFrequency) & 0xff000000) >> 24) : __swap32md(table ->ACPILevel.SclkFrequency))); |
1361 | CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage)((table->ACPILevel.MinVoltage) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.MinVoltage) ? (__uint32_t)(((__uint32_t) (table->ACPILevel.MinVoltage) & 0xff) << 24 | (( __uint32_t)(table->ACPILevel.MinVoltage) & 0xff00) << 8 | ((__uint32_t)(table->ACPILevel.MinVoltage) & 0xff0000 ) >> 8 | ((__uint32_t)(table->ACPILevel.MinVoltage) & 0xff000000) >> 24) : __swap32md(table->ACPILevel.MinVoltage ))); |
1362 | CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl)((table->ACPILevel.CgSpllFuncCntl) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.CgSpllFuncCntl) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.CgSpllFuncCntl) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl) & 0xff00 ) << 8 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .CgSpllFuncCntl) & 0xff000000) >> 24) : __swap32md( table->ACPILevel.CgSpllFuncCntl))); |
1363 | CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2)((table->ACPILevel.CgSpllFuncCntl2) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.CgSpllFuncCntl2) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.CgSpllFuncCntl2) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl2) & 0xff00 ) << 8 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl2 ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .CgSpllFuncCntl2) & 0xff000000) >> 24) : __swap32md (table->ACPILevel.CgSpllFuncCntl2))); |
1364 | CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3)((table->ACPILevel.CgSpllFuncCntl3) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.CgSpllFuncCntl3) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.CgSpllFuncCntl3) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl3) & 0xff00 ) << 8 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl3 ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .CgSpllFuncCntl3) & 0xff000000) >> 24) : __swap32md (table->ACPILevel.CgSpllFuncCntl3))); |
1365 | CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4)((table->ACPILevel.CgSpllFuncCntl4) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.CgSpllFuncCntl4) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.CgSpllFuncCntl4) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl4) & 0xff00 ) << 8 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl4 ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .CgSpllFuncCntl4) & 0xff000000) >> 24) : __swap32md (table->ACPILevel.CgSpllFuncCntl4))); |
1366 | CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum)((table->ACPILevel.SpllSpreadSpectrum) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.SpllSpreadSpectrum) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.SpllSpreadSpectrum) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.SpllSpreadSpectrum) & 0xff00) << 8 | ((__uint32_t)(table->ACPILevel.SpllSpreadSpectrum ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .SpllSpreadSpectrum) & 0xff000000) >> 24) : __swap32md (table->ACPILevel.SpllSpreadSpectrum))); |
1367 | CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2)((table->ACPILevel.SpllSpreadSpectrum2) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.SpllSpreadSpectrum2) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.SpllSpreadSpectrum2) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.SpllSpreadSpectrum2) & 0xff00) << 8 | ((__uint32_t)(table->ACPILevel.SpllSpreadSpectrum2 ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .SpllSpreadSpectrum2) & 0xff000000) >> 24) : __swap32md (table->ACPILevel.SpllSpreadSpectrum2))); |
1368 | CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm)((table->ACPILevel.CcPwrDynRm) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.CcPwrDynRm) ? (__uint32_t)(((__uint32_t) (table->ACPILevel.CcPwrDynRm) & 0xff) << 24 | (( __uint32_t)(table->ACPILevel.CcPwrDynRm) & 0xff00) << 8 | ((__uint32_t)(table->ACPILevel.CcPwrDynRm) & 0xff0000 ) >> 8 | ((__uint32_t)(table->ACPILevel.CcPwrDynRm) & 0xff000000) >> 24) : __swap32md(table->ACPILevel.CcPwrDynRm ))); |
1369 | CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1)((table->ACPILevel.CcPwrDynRm1) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.CcPwrDynRm1) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.CcPwrDynRm1) & 0xff) << 24 | ( (__uint32_t)(table->ACPILevel.CcPwrDynRm1) & 0xff00) << 8 | ((__uint32_t)(table->ACPILevel.CcPwrDynRm1) & 0xff0000 ) >> 8 | ((__uint32_t)(table->ACPILevel.CcPwrDynRm1) & 0xff000000) >> 24) : __swap32md(table->ACPILevel .CcPwrDynRm1))); |
1370 | |
1371 | if (!data->mclk_dpm_key_disabled) { |
1372 | /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ |
1373 | table->MemoryACPILevel.MclkFrequency = |
1374 | data->dpm_table.mclk_table.dpm_levels[0].value; |
1375 | result = fiji_get_dependency_volt_by_clk(hwmgr, |
1376 | table_info->vdd_dep_on_mclk, |
1377 | table->MemoryACPILevel.MclkFrequency, |
1378 | (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd); |
1379 | PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDCI voltage value in Clock Dependency Table" ); ; } } while (0) |
1380 | "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDCI voltage value in Clock Dependency Table" ); ; } } while (0) |
1381 | )do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDCI voltage value in Clock Dependency Table" ); ; } } while (0); |
1382 | } else { |
1383 | table->MemoryACPILevel.MclkFrequency = |
1384 | data->vbios_boot_state.mclk_bootup_value; |
1385 | table->MemoryACPILevel.MinVoltage = |
1386 | data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE4; |
1387 | } |
1388 | |
1389 | us_mvdd = 0; |
1390 | if ((SMU7_VOLTAGE_CONTROL_NONE0x0 == data->mvdd_control) || |
1391 | (data->mclk_dpm_key_disabled)) |
1392 | us_mvdd = data->vbios_boot_state.mvdd_bootup_value; |
1393 | else { |
1394 | if (!fiji_populate_mvdd_value(hwmgr, |
1395 | data->dpm_table.mclk_table.dpm_levels[0].value, |
1396 | &vol_level)) |
1397 | us_mvdd = vol_level.Voltage; |
1398 | } |
1399 | |
1400 | table->MemoryACPILevel.MinMvdd = |
1401 | PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE)(__uint32_t)(__builtin_constant_p(us_mvdd * 4) ? (__uint32_t) (((__uint32_t)(us_mvdd * 4) & 0xff) << 24 | ((__uint32_t )(us_mvdd * 4) & 0xff00) << 8 | ((__uint32_t)(us_mvdd * 4) & 0xff0000) >> 8 | ((__uint32_t)(us_mvdd * 4) & 0xff000000) >> 24) : __swap32md(us_mvdd * 4)); |
1402 | |
1403 | table->MemoryACPILevel.EnabledForThrottle = 0; |
1404 | table->MemoryACPILevel.EnabledForActivity = 0; |
1405 | table->MemoryACPILevel.UpHyst = 0; |
1406 | table->MemoryACPILevel.DownHyst = 100; |
1407 | table->MemoryACPILevel.VoltageDownHyst = 0; |
1408 | table->MemoryACPILevel.ActivityLevel = |
1409 | PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity)(__uint16_t)(__builtin_constant_p(data->current_profile_setting .mclk_activity) ? (__uint16_t)(((__uint16_t)(data->current_profile_setting .mclk_activity) & 0xffU) << 8 | ((__uint16_t)(data-> current_profile_setting.mclk_activity) & 0xff00U) >> 8) : __swap16md(data->current_profile_setting.mclk_activity )); |
1410 | |
1411 | table->MemoryACPILevel.StutterEnable = false0; |
1412 | CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency)((table->MemoryACPILevel.MclkFrequency) = (__uint32_t)(__builtin_constant_p (table->MemoryACPILevel.MclkFrequency) ? (__uint32_t)(((__uint32_t )(table->MemoryACPILevel.MclkFrequency) & 0xff) << 24 | ((__uint32_t)(table->MemoryACPILevel.MclkFrequency) & 0xff00) << 8 | ((__uint32_t)(table->MemoryACPILevel .MclkFrequency) & 0xff0000) >> 8 | ((__uint32_t)(table ->MemoryACPILevel.MclkFrequency) & 0xff000000) >> 24) : __swap32md(table->MemoryACPILevel.MclkFrequency))); |
1413 | CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage)((table->MemoryACPILevel.MinVoltage) = (__uint32_t)(__builtin_constant_p (table->MemoryACPILevel.MinVoltage) ? (__uint32_t)(((__uint32_t )(table->MemoryACPILevel.MinVoltage) & 0xff) << 24 | ((__uint32_t)(table->MemoryACPILevel.MinVoltage) & 0xff00 ) << 8 | ((__uint32_t)(table->MemoryACPILevel.MinVoltage ) & 0xff0000) >> 8 | ((__uint32_t)(table->MemoryACPILevel .MinVoltage) & 0xff000000) >> 24) : __swap32md(table ->MemoryACPILevel.MinVoltage))); |
1414 | |
1415 | return result; |
1416 | } |
1417 | |
1418 | static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr, |
1419 | SMU73_Discrete_DpmTable *table) |
1420 | { |
1421 | int result = -EINVAL22; |
1422 | uint8_t count; |
1423 | struct pp_atomctrl_clock_dividers_vi dividers; |
1424 | struct phm_ppt_v1_information *table_info = |
1425 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
1426 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = |
1427 | table_info->mm_dep_table; |
1428 | |
1429 | table->VceLevelCount = (uint8_t)(mm_table->count); |
1430 | table->VceBootLevel = 0; |
1431 | |
1432 | for (count = 0; count < table->VceLevelCount; count++) { |
1433 | table->VceLevel[count].Frequency = mm_table->entries[count].eclk; |
1434 | table->VceLevel[count].MinVoltage = 0; |
1435 | table->VceLevel[count].MinVoltage |= |
1436 | (mm_table->entries[count].vddc * VOLTAGE_SCALE4) << VDDC_SHIFT0; |
1437 | table->VceLevel[count].MinVoltage |= |
1438 | ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA300) * |
1439 | VOLTAGE_SCALE4) << VDDCI_SHIFT15; |
1440 | table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT30; |
1441 | |
1442 | /*retrieve divider value for VBIOS */ |
1443 | result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, |
1444 | table->VceLevel[count].Frequency, ÷rs); |
1445 | PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for VCE engine clock"); return result ; } } while (0) |
1446 | "can not find divide id for VCE engine clock",do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for VCE engine clock"); return result ; } } while (0) |
1447 | return result)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for VCE engine clock"); return result ; } } while (0); |
1448 | |
1449 | table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; |
1450 | |
1451 | CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency)((table->VceLevel[count].Frequency) = (__uint32_t)(__builtin_constant_p (table->VceLevel[count].Frequency) ? (__uint32_t)(((__uint32_t )(table->VceLevel[count].Frequency) & 0xff) << 24 | ((__uint32_t)(table->VceLevel[count].Frequency) & 0xff00 ) << 8 | ((__uint32_t)(table->VceLevel[count].Frequency ) & 0xff0000) >> 8 | ((__uint32_t)(table->VceLevel [count].Frequency) & 0xff000000) >> 24) : __swap32md (table->VceLevel[count].Frequency))); |
1452 | CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage)((table->VceLevel[count].MinVoltage) = (__uint32_t)(__builtin_constant_p (table->VceLevel[count].MinVoltage) ? (__uint32_t)(((__uint32_t )(table->VceLevel[count].MinVoltage) & 0xff) << 24 | ((__uint32_t)(table->VceLevel[count].MinVoltage) & 0xff00 ) << 8 | ((__uint32_t)(table->VceLevel[count].MinVoltage ) & 0xff0000) >> 8 | ((__uint32_t)(table->VceLevel [count].MinVoltage) & 0xff000000) >> 24) : __swap32md (table->VceLevel[count].MinVoltage))); |
1453 | } |
1454 | return result; |
1455 | } |
1456 | |
1457 | static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr, |
1458 | SMU73_Discrete_DpmTable *table) |
1459 | { |
1460 | int result = -EINVAL22; |
1461 | uint8_t count; |
1462 | struct pp_atomctrl_clock_dividers_vi dividers; |
1463 | struct phm_ppt_v1_information *table_info = |
1464 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
1465 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = |
1466 | table_info->mm_dep_table; |
1467 | |
1468 | table->AcpLevelCount = (uint8_t)(mm_table->count); |
1469 | table->AcpBootLevel = 0; |
1470 | |
1471 | for (count = 0; count < table->AcpLevelCount; count++) { |
1472 | table->AcpLevel[count].Frequency = mm_table->entries[count].aclk; |
1473 | table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc * |
1474 | VOLTAGE_SCALE4) << VDDC_SHIFT0; |
1475 | table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - |
1476 | VDDC_VDDCI_DELTA300) * VOLTAGE_SCALE4) << VDDCI_SHIFT15; |
1477 | table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT30; |
1478 | |
1479 | /* retrieve divider value for VBIOS */ |
1480 | result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, |
1481 | table->AcpLevel[count].Frequency, ÷rs); |
1482 | PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for engine clock"); return result; } } while (0) |
1483 | "can not find divide id for engine clock", return result)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for engine clock"); return result; } } while (0); |
1484 | |
1485 | table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; |
1486 | |
1487 | CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency)((table->AcpLevel[count].Frequency) = (__uint32_t)(__builtin_constant_p (table->AcpLevel[count].Frequency) ? (__uint32_t)(((__uint32_t )(table->AcpLevel[count].Frequency) & 0xff) << 24 | ((__uint32_t)(table->AcpLevel[count].Frequency) & 0xff00 ) << 8 | ((__uint32_t)(table->AcpLevel[count].Frequency ) & 0xff0000) >> 8 | ((__uint32_t)(table->AcpLevel [count].Frequency) & 0xff000000) >> 24) : __swap32md (table->AcpLevel[count].Frequency))); |
1488 | CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage)((table->AcpLevel[count].MinVoltage) = (__uint32_t)(__builtin_constant_p (table->AcpLevel[count].MinVoltage) ? (__uint32_t)(((__uint32_t )(table->AcpLevel[count].MinVoltage) & 0xff) << 24 | ((__uint32_t)(table->AcpLevel[count].MinVoltage) & 0xff00 ) << 8 | ((__uint32_t)(table->AcpLevel[count].MinVoltage ) & 0xff0000) >> 8 | ((__uint32_t)(table->AcpLevel [count].MinVoltage) & 0xff000000) >> 24) : __swap32md (table->AcpLevel[count].MinVoltage))); |
1489 | } |
1490 | return result; |
1491 | } |
1492 | |
1493 | static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, |
1494 | int32_t eng_clock, int32_t mem_clock, |
1495 | struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs) |
1496 | { |
1497 | uint32_t dram_timing; |
1498 | uint32_t dram_timing2; |
1499 | uint32_t burstTime; |
1500 | ULONG trrds, trrdl; |
1501 | int result; |
1502 | |
1503 | result = atomctrl_set_engine_dram_timings_rv770(hwmgr, |
1504 | eng_clock, mem_clock); |
1505 | PP_ASSERT_WITH_CODE(result == 0,do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error calling VBIOS to set DRAM_TIMING." ); return result; } } while (0) |
1506 | "Error calling VBIOS to set DRAM_TIMING.", return result)do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error calling VBIOS to set DRAM_TIMING." ); return result; } } while (0); |
1507 | |
1508 | dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING)(((struct cgs_device *)hwmgr->device)->ops->read_register (hwmgr->device,0x9dd)); |
1509 | dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2)(((struct cgs_device *)hwmgr->device)->ops->read_register (hwmgr->device,0x9de)); |
1510 | burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME)(((struct cgs_device *)hwmgr->device)->ops->read_register (hwmgr->device,0xa02)); |
1511 | |
1512 | trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0)(((burstTime) & 0x7c00) >> 0xa); |
1513 | trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0)(((burstTime) & 0x1f00000) >> 0x14); |
1514 | |
1515 | arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing)(__uint32_t)(__builtin_constant_p(dram_timing) ? (__uint32_t) (((__uint32_t)(dram_timing) & 0xff) << 24 | ((__uint32_t )(dram_timing) & 0xff00) << 8 | ((__uint32_t)(dram_timing ) & 0xff0000) >> 8 | ((__uint32_t)(dram_timing) & 0xff000000) >> 24) : __swap32md(dram_timing)); |
1516 | arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2)(__uint32_t)(__builtin_constant_p(dram_timing2) ? (__uint32_t )(((__uint32_t)(dram_timing2) & 0xff) << 24 | ((__uint32_t )(dram_timing2) & 0xff00) << 8 | ((__uint32_t)(dram_timing2 ) & 0xff0000) >> 8 | ((__uint32_t)(dram_timing2) & 0xff000000) >> 24) : __swap32md(dram_timing2)); |
1517 | arb_regs->McArbBurstTime = (uint8_t)burstTime; |
1518 | arb_regs->TRRDS = (uint8_t)trrds; |
1519 | arb_regs->TRRDL = (uint8_t)trrdl; |
1520 | |
1521 | return 0; |
1522 | } |
1523 | |
1524 | static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) |
1525 | { |
1526 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1527 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
1528 | struct SMU73_Discrete_MCArbDramTimingTable arb_regs; |
1529 | uint32_t i, j; |
1530 | int result = 0; |
1531 | |
1532 | for (i = 0; i < data->dpm_table.sclk_table.count; i++) { |
1533 | for (j = 0; j < data->dpm_table.mclk_table.count; j++) { |
1534 | result = fiji_populate_memory_timing_parameters(hwmgr, |
1535 | data->dpm_table.sclk_table.dpm_levels[i].value, |
1536 | data->dpm_table.mclk_table.dpm_levels[j].value, |
1537 | &arb_regs.entries[i][j]); |
1538 | if (result) |
1539 | break; |
1540 | } |
1541 | } |
1542 | |
1543 | if (!result) |
1544 | result = smu7_copy_bytes_to_smc( |
1545 | hwmgr, |
1546 | smu_data->smu7_data.arb_table_start, |
1547 | (uint8_t *)&arb_regs, |
1548 | sizeof(SMU73_Discrete_MCArbDramTimingTable), |
1549 | SMC_RAM_END0x40000); |
1550 | return result; |
1551 | } |
1552 | |
1553 | static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, |
1554 | struct SMU73_Discrete_DpmTable *table) |
1555 | { |
1556 | int result = -EINVAL22; |
1557 | uint8_t count; |
1558 | struct pp_atomctrl_clock_dividers_vi dividers; |
1559 | struct phm_ppt_v1_information *table_info = |
1560 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
1561 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = |
1562 | table_info->mm_dep_table; |
1563 | |
1564 | table->UvdLevelCount = (uint8_t)(mm_table->count); |
1565 | table->UvdBootLevel = 0; |
1566 | |
1567 | for (count = 0; count < table->UvdLevelCount; count++) { |
1568 | table->UvdLevel[count].MinVoltage = 0; |
1569 | table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; |
1570 | table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; |
1571 | table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * |
1572 | VOLTAGE_SCALE4) << VDDC_SHIFT0; |
1573 | table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - |
1574 | VDDC_VDDCI_DELTA300) * VOLTAGE_SCALE4) << VDDCI_SHIFT15; |
1575 | table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT30; |
1576 | |
1577 | /* retrieve divider value for VBIOS */ |
1578 | result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, |
1579 | table->UvdLevel[count].VclkFrequency, ÷rs); |
1580 | PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for Vclk clock"); return result; } } while (0) |
1581 | "can not find divide id for Vclk clock", return result)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for Vclk clock"); return result; } } while (0); |
1582 | |
1583 | table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; |
1584 | |
1585 | result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, |
1586 | table->UvdLevel[count].DclkFrequency, ÷rs); |
1587 | PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for Dclk clock"); return result; } } while (0) |
1588 | "can not find divide id for Dclk clock", return result)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for Dclk clock"); return result; } } while (0); |
1589 | |
1590 | table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; |
1591 | |
1592 | CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency)((table->UvdLevel[count].VclkFrequency) = (__uint32_t)(__builtin_constant_p (table->UvdLevel[count].VclkFrequency) ? (__uint32_t)(((__uint32_t )(table->UvdLevel[count].VclkFrequency) & 0xff) << 24 | ((__uint32_t)(table->UvdLevel[count].VclkFrequency) & 0xff00) << 8 | ((__uint32_t)(table->UvdLevel[count] .VclkFrequency) & 0xff0000) >> 8 | ((__uint32_t)(table ->UvdLevel[count].VclkFrequency) & 0xff000000) >> 24) : __swap32md(table->UvdLevel[count].VclkFrequency))); |
1593 | CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency)((table->UvdLevel[count].DclkFrequency) = (__uint32_t)(__builtin_constant_p (table->UvdLevel[count].DclkFrequency) ? (__uint32_t)(((__uint32_t )(table->UvdLevel[count].DclkFrequency) & 0xff) << 24 | ((__uint32_t)(table->UvdLevel[count].DclkFrequency) & 0xff00) << 8 | ((__uint32_t)(table->UvdLevel[count] .DclkFrequency) & 0xff0000) >> 8 | ((__uint32_t)(table ->UvdLevel[count].DclkFrequency) & 0xff000000) >> 24) : __swap32md(table->UvdLevel[count].DclkFrequency))); |
1594 | CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage)((table->UvdLevel[count].MinVoltage) = (__uint32_t)(__builtin_constant_p (table->UvdLevel[count].MinVoltage) ? (__uint32_t)(((__uint32_t )(table->UvdLevel[count].MinVoltage) & 0xff) << 24 | ((__uint32_t)(table->UvdLevel[count].MinVoltage) & 0xff00 ) << 8 | ((__uint32_t)(table->UvdLevel[count].MinVoltage ) & 0xff0000) >> 8 | ((__uint32_t)(table->UvdLevel [count].MinVoltage) & 0xff000000) >> 24) : __swap32md (table->UvdLevel[count].MinVoltage))); |
1595 | |
1596 | } |
1597 | return result; |
1598 | } |
1599 | |
1600 | static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr, |
1601 | struct SMU73_Discrete_DpmTable *table) |
1602 | { |
1603 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1604 | |
1605 | table->GraphicsBootLevel = 0; |
1606 | table->MemoryBootLevel = 0; |
1607 | |
1608 | /* find boot level from dpm table */ |
1609 | phm_find_boot_level(&(data->dpm_table.sclk_table), |
1610 | data->vbios_boot_state.sclk_bootup_value, |
1611 | (uint32_t *)&(table->GraphicsBootLevel)); |
1612 | |
1613 | phm_find_boot_level(&(data->dpm_table.mclk_table), |
1614 | data->vbios_boot_state.mclk_bootup_value, |
1615 | (uint32_t *)&(table->MemoryBootLevel)); |
1616 | |
1617 | table->BootVddc = data->vbios_boot_state.vddc_bootup_value * |
1618 | VOLTAGE_SCALE4; |
1619 | table->BootVddci = data->vbios_boot_state.vddci_bootup_value * |
1620 | VOLTAGE_SCALE4; |
1621 | table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * |
1622 | VOLTAGE_SCALE4; |
1623 | |
1624 | CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc)((table->BootVddc) = (__uint16_t)(__builtin_constant_p(table ->BootVddc) ? (__uint16_t)(((__uint16_t)(table->BootVddc ) & 0xffU) << 8 | ((__uint16_t)(table->BootVddc) & 0xff00U) >> 8) : __swap16md(table->BootVddc)) ); |
1625 | CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci)((table->BootVddci) = (__uint16_t)(__builtin_constant_p(table ->BootVddci) ? (__uint16_t)(((__uint16_t)(table->BootVddci ) & 0xffU) << 8 | ((__uint16_t)(table->BootVddci ) & 0xff00U) >> 8) : __swap16md(table->BootVddci ))); |
1626 | CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd)((table->BootMVdd) = (__uint16_t)(__builtin_constant_p(table ->BootMVdd) ? (__uint16_t)(((__uint16_t)(table->BootMVdd ) & 0xffU) << 8 | ((__uint16_t)(table->BootMVdd) & 0xff00U) >> 8) : __swap16md(table->BootMVdd)) ); |
1627 | |
1628 | return 0; |
1629 | } |
1630 | |
1631 | static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) |
1632 | { |
1633 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1634 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
1635 | struct phm_ppt_v1_information *table_info = |
1636 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
1637 | uint8_t count, level; |
1638 | |
1639 | count = (uint8_t)(table_info->vdd_dep_on_sclk->count); |
1640 | for (level = 0; level < count; level++) { |
1641 | if (table_info->vdd_dep_on_sclk->entries[level].clk >= |
1642 | data->vbios_boot_state.sclk_bootup_value) { |
1643 | smu_data->smc_state_table.GraphicsBootLevel = level; |
1644 | break; |
1645 | } |
1646 | } |
1647 | |
1648 | count = (uint8_t)(table_info->vdd_dep_on_mclk->count); |
1649 | for (level = 0; level < count; level++) { |
1650 | if (table_info->vdd_dep_on_mclk->entries[level].clk >= |
1651 | data->vbios_boot_state.mclk_bootup_value) { |
1652 | smu_data->smc_state_table.MemoryBootLevel = level; |
1653 | break; |
1654 | } |
1655 | } |
1656 | |
1657 | return 0; |
1658 | } |
1659 | |
1660 | static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) |
1661 | { |
1662 | uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, |
1663 | volt_with_cks, value; |
1664 | uint16_t clock_freq_u16; |
1665 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
1666 | uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, |
1667 | volt_offset = 0; |
1668 | struct phm_ppt_v1_information *table_info = |
1669 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
1670 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = |
1671 | table_info->vdd_dep_on_sclk; |
1672 | |
1673 | stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; |
1674 | |
1675 | /* Read SMU_Eefuse to read and calculate RO and determine |
1676 | * if the part is SS or FF. if RO >= 1660MHz, part is FF. |
1677 | */ |
1678 | efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0100000 + (146 * 4))) |
1679 | ixSMU_EFUSE_0 + (146 * 4))(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0100000 + (146 * 4))); |
1680 | efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0100000 + (148 * 4))) |
1681 | ixSMU_EFUSE_0 + (148 * 4))(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0100000 + (148 * 4))); |
1682 | efuse &= 0xFF000000; |
1683 | efuse = efuse >> 24; |
1684 | efuse2 &= 0xF; |
1685 | |
1686 | if (efuse2 == 1) |
1687 | ro = (2300 - 1350) * efuse / 255 + 1350; |
1688 | else |
1689 | ro = (2500 - 1000) * efuse / 255 + 1000; |
1690 | |
1691 | if (ro >= 1660) |
1692 | type = 0; |
1693 | else |
1694 | type = 1; |
1695 | |
1696 | /* Populate Stretch amount */ |
1697 | smu_data->smc_state_table.ClockStretcherAmount = stretch_amount; |
1698 | |
1699 | /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ |
1700 | for (i = 0; i < sclk_table->count; i++) { |
1701 | smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |= |
1702 | sclk_table->entries[i].cks_enable << i; |
1703 | volt_without_cks = (uint32_t)((14041 * |
1704 | (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / |
1705 | (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); |
1706 | volt_with_cks = (uint32_t)((13946 * |
1707 | (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / |
1708 | (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); |
1709 | if (volt_without_cks >= volt_with_cks) |
1710 | volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + |
1711 | sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); |
1712 | smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; |
1713 | } |
1714 | |
1715 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x1) | (0x1 & ((0x0) << 0x0))))) |
1716 | STRETCH_ENABLE, 0x0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x1) | (0x1 & ((0x0) << 0x0))))); |
1717 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x2) | (0x2 & ((0x1) << 0x1))))) |
1718 | masterReset, 0x1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x2) | (0x2 & ((0x1) << 0x1))))); |
1719 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x4) | (0x4 & ((0x1) << 0x2))))) |
1720 | staticEnable, 0x1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x4) | (0x4 & ((0x1) << 0x2))))); |
1721 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x2) | (0x2 & ((0x0) << 0x1))))) |
1722 | masterReset, 0x0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x2) | (0x2 & ((0x0) << 0x1))))); |
1723 | |
1724 | /* Populate CKS Lookup Table */ |
1725 | if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) |
1726 | stretch_amount2 = 0; |
1727 | else if (stretch_amount == 3 || stretch_amount == 4) |
1728 | stretch_amount2 = 1; |
1729 | else { |
1730 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
1731 | PHM_PlatformCaps_ClockStretcher); |
1732 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Stretch Amount in PPTable not supported" ); return -22; } } while (0) |
1733 | "Stretch Amount in PPTable not supported",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Stretch Amount in PPTable not supported" ); return -22; } } while (0) |
1734 | return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Stretch Amount in PPTable not supported" ); return -22; } } while (0); |
1735 | } |
1736 | |
1737 | value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0200350)) |
1738 | ixPWR_CKS_CNTL)(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0200350)); |
1739 | value &= 0xFFC2FF87; |
1740 | smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = |
1741 | fiji_clock_stretcher_lookup_table[stretch_amount2][0]; |
1742 | smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = |
1743 | fiji_clock_stretcher_lookup_table[stretch_amount2][1]; |
1744 | clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.(__uint32_t)(__builtin_constant_p(smu_data->smc_state_table . GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) ? (__uint32_t)(((__uint32_t)(smu_data-> smc_state_table. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff) << 24 | ((__uint32_t) (smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table .GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff00) << 8 | ((__uint32_t)(smu_data->smc_state_table. GraphicsLevel [smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency ) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table . GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff000000) >> 24) : __swap32md (smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table .GraphicsDpmLevelCount - 1]. SclkFrequency)) |
1745 | GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].(__uint32_t)(__builtin_constant_p(smu_data->smc_state_table . GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) ? (__uint32_t)(((__uint32_t)(smu_data-> smc_state_table. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff) << 24 | ((__uint32_t) (smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table .GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff00) << 8 | ((__uint32_t)(smu_data->smc_state_table. GraphicsLevel [smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency ) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table . GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff000000) >> 24) : __swap32md (smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table .GraphicsDpmLevelCount - 1]. SclkFrequency)) |
1746 | SclkFrequency)(__uint32_t)(__builtin_constant_p(smu_data->smc_state_table . GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) ? (__uint32_t)(((__uint32_t)(smu_data-> smc_state_table. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff) << 24 | ((__uint32_t) (smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table .GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff00) << 8 | ((__uint32_t)(smu_data->smc_state_table. GraphicsLevel [smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency ) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table . GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff000000) >> 24) : __swap32md (smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table .GraphicsDpmLevelCount - 1]. SclkFrequency)) / 100); |
1747 | if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] < |
1748 | clock_freq_u16 && |
1749 | fiji_clock_stretcher_lookup_table[stretch_amount2][1] > |
1750 | clock_freq_u16) { |
1751 | /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ |
1752 | value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; |
1753 | /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ |
1754 | value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; |
1755 | /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ |
1756 | value |= (fiji_clock_stretch_amount_conversion |
1757 | [fiji_clock_stretcher_lookup_table[stretch_amount2][3]] |
1758 | [stretch_amount]) << 3; |
1759 | } |
1760 | CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.((smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].minFreq) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0].minFreq ) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable . CKS_LOOKUPTableEntry[0].minFreq) & 0xffU) << 8 | ( (__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].minFreq) & 0xff00U) >> 8) : __swap16md(smu_data ->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0] .minFreq))) |
1761 | CKS_LOOKUPTableEntry[0].minFreq)((smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].minFreq) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0].minFreq ) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable . CKS_LOOKUPTableEntry[0].minFreq) & 0xffU) << 8 | ( (__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].minFreq) & 0xff00U) >> 8) : __swap16md(smu_data ->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0] .minFreq))); |
1762 | CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.((smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].maxFreq) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0].maxFreq ) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable . CKS_LOOKUPTableEntry[0].maxFreq) & 0xffU) << 8 | ( (__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].maxFreq) & 0xff00U) >> 8) : __swap16md(smu_data ->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0] .maxFreq))) |
1763 | CKS_LOOKUPTableEntry[0].maxFreq)((smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].maxFreq) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0].maxFreq ) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable . CKS_LOOKUPTableEntry[0].maxFreq) & 0xffU) << 8 | ( (__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].maxFreq) & 0xff00U) >> 8) : __swap16md(smu_data ->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0] .maxFreq))); |
1764 | smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = |
1765 | fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; |
1766 | smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= |
1767 | (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; |
1768 | |
1769 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0200350,value)) |
1770 | ixPWR_CKS_CNTL, value)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0200350,value)); |
1771 | |
1772 | /* Populate DDT Lookup Table */ |
1773 | for (i = 0; i < 4; i++) { |
1774 | /* Assign the minimum and maximum VID stored |
1775 | * in the last row of Clock Stretcher Voltage Table. |
1776 | */ |
1777 | smu_data->smc_state_table.ClockStretcherDataTable. |
1778 | ClockStretcherDataTableEntry[i].minVID = |
1779 | (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2]; |
1780 | smu_data->smc_state_table.ClockStretcherDataTable. |
1781 | ClockStretcherDataTableEntry[i].maxVID = |
1782 | (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3]; |
1783 | /* Loop through each SCLK and check the frequency |
1784 | * to see if it lies within the frequency for clock stretcher. |
1785 | */ |
1786 | for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) { |
1787 | cks_setting = 0; |
1788 | clock_freq = PP_SMC_TO_HOST_UL((__uint32_t)(__builtin_constant_p(smu_data->smc_state_table .GraphicsLevel[j].SclkFrequency) ? (__uint32_t)(((__uint32_t) (smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency) & 0xff) << 24 | ((__uint32_t)(smu_data->smc_state_table .GraphicsLevel[j].SclkFrequency) & 0xff00) << 8 | ( (__uint32_t)(smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency ) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table .GraphicsLevel[j].SclkFrequency) & 0xff000000) >> 24 ) : __swap32md(smu_data->smc_state_table.GraphicsLevel[j]. SclkFrequency)) |
1789 | smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency)(__uint32_t)(__builtin_constant_p(smu_data->smc_state_table .GraphicsLevel[j].SclkFrequency) ? (__uint32_t)(((__uint32_t) (smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency) & 0xff) << 24 | ((__uint32_t)(smu_data->smc_state_table .GraphicsLevel[j].SclkFrequency) & 0xff00) << 8 | ( (__uint32_t)(smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency ) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table .GraphicsLevel[j].SclkFrequency) & 0xff000000) >> 24 ) : __swap32md(smu_data->smc_state_table.GraphicsLevel[j]. SclkFrequency)); |
1790 | /* Check the allowed frequency against the sclk level[j]. |
1791 | * Sclk's endianness has already been converted, |
1792 | * and it's in 10Khz unit, |
1793 | * as opposed to Data table, which is in Mhz unit. |
1794 | */ |
1795 | if (clock_freq >= |
1796 | (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) { |
1797 | cks_setting |= 0x2; |
1798 | if (clock_freq < |
1799 | (fiji_clock_stretcher_ddt_table[type][i][1]) * 100) |
1800 | cks_setting |= 0x1; |
1801 | } |
1802 | smu_data->smc_state_table.ClockStretcherDataTable. |
1803 | ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2); |
1804 | } |
1805 | CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.((smu_data->smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry [i].setting) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry [i].setting) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ) & 0xffU) << 8 | ((__uint16_t)(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ) & 0xff00U) >> 8) : __swap16md(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ))) |
1806 | ClockStretcherDataTable.((smu_data->smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry [i].setting) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry [i].setting) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ) & 0xffU) << 8 | ((__uint16_t)(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ) & 0xff00U) >> 8) : __swap16md(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ))) |
1807 | ClockStretcherDataTableEntry[i].setting)((smu_data->smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry [i].setting) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry [i].setting) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ) & 0xffU) << 8 | ((__uint16_t)(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ) & 0xff00U) >> 8) : __swap16md(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ))); |
1808 | } |
1809 | |
1810 | value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL)(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0200350)); |
1811 | value &= 0xFFFFFFFE; |
1812 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0200350,value)); |
1813 | |
1814 | return 0; |
1815 | } |
1816 | |
1817 | static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr, |
1818 | struct SMU73_Discrete_DpmTable *table) |
1819 | { |
1820 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1821 | uint16_t config; |
1822 | |
1823 | config = VR_MERGED_WITH_VDDC0; |
1824 | table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT8); |
1825 | |
1826 | /* Set Vddc Voltage Controller */ |
1827 | if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->voltage_control) { |
1828 | config = VR_SVI2_PLANE_11; |
1829 | table->VRConfig |= config; |
1830 | } else { |
1831 | PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "VDDC should be on SVI2 control in merged mode!" ); ; } } while (0) |
1832 | "VDDC should be on SVI2 control in merged mode!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "VDDC should be on SVI2 control in merged mode!" ); ; } } while (0) |
1833 | )do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "VDDC should be on SVI2 control in merged mode!" ); ; } } while (0); |
1834 | } |
1835 | /* Set Vddci Voltage Controller */ |
1836 | if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->vddci_control) { |
1837 | config = VR_SVI2_PLANE_22; /* only in merged mode */ |
1838 | table->VRConfig |= (config << VRCONF_VDDCI_SHIFT16); |
1839 | } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO0x1 == data->vddci_control) { |
1840 | config = VR_SMIO_PATTERN_13; |
1841 | table->VRConfig |= (config << VRCONF_VDDCI_SHIFT16); |
1842 | } else { |
1843 | config = VR_STATIC_VOLTAGE5; |
1844 | table->VRConfig |= (config << VRCONF_VDDCI_SHIFT16); |
1845 | } |
1846 | /* Set Mvdd Voltage Controller */ |
1847 | if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->mvdd_control) { |
1848 | config = VR_SVI2_PLANE_22; |
1849 | table->VRConfig |= (config << VRCONF_MVDD_SHIFT24); |
1850 | } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO0x1 == data->mvdd_control) { |
1851 | config = VR_SMIO_PATTERN_24; |
1852 | table->VRConfig |= (config << VRCONF_MVDD_SHIFT24); |
1853 | } else { |
1854 | config = VR_STATIC_VOLTAGE5; |
1855 | table->VRConfig |= (config << VRCONF_MVDD_SHIFT24); |
1856 | } |
1857 | |
1858 | return 0; |
1859 | } |
1860 | |
1861 | static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr) |
1862 | { |
1863 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
1864 | uint32_t tmp; |
1865 | int result; |
1866 | |
1867 | /* This is a read-modify-write on the first byte of the ARB table. |
1868 | * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure |
1869 | * is the field 'current'. |
1870 | * This solution is ugly, but we never write the whole table only |
1871 | * individual fields in it. |
1872 | * In reality this field should not be in that structure |
1873 | * but in a soft register. |
1874 | */ |
1875 | result = smu7_read_smc_sram_dword(hwmgr, |
1876 | smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END0x40000); |
1877 | |
1878 | if (result) |
1879 | return result; |
1880 | |
1881 | tmp &= 0x00FFFFFF; |
1882 | tmp |= ((uint32_t)MC_CG_ARB_FREQ_F10x0b) << 24; |
1883 | |
1884 | return smu7_write_smc_sram_dword(hwmgr, |
1885 | smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END0x40000); |
1886 | } |
1887 | |
1888 | static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr) |
1889 | { |
1890 | pp_atomctrl_voltage_table param_led_dpm; |
1891 | int result = 0; |
1892 | u32 mask = 0; |
1893 | |
1894 | result = atomctrl_get_voltage_table_v3(hwmgr, |
1895 | VOLTAGE_TYPE_LEDDPM8, VOLTAGE_OBJ_GPIO_LUT0, |
1896 | ¶m_led_dpm); |
1897 | if (result == 0) { |
1898 | int i, j; |
1899 | u32 tmp = param_led_dpm.mask_low; |
1900 | |
1901 | for (i = 0, j = 0; i < 32; i++) { |
1902 | if (tmp & 1) { |
1903 | mask |= (i << (8 * j)); |
1904 | if (++j >= 3) |
1905 | break; |
1906 | } |
1907 | tmp >>= 1; |
1908 | } |
1909 | } |
1910 | if (mask) |
1911 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1912 | PPSMC_MSG_LedConfig((uint16_t) 0x274), |
1913 | mask, |
1914 | NULL((void *)0)); |
1915 | return 0; |
1916 | } |
1917 | |
1918 | static int fiji_init_smc_table(struct pp_hwmgr *hwmgr) |
1919 | { |
1920 | int result; |
1921 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1922 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
1923 | struct phm_ppt_v1_information *table_info = |
1924 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
1925 | struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table); |
1926 | uint8_t i; |
1927 | struct pp_atomctrl_gpio_pin_assignment gpio_pin; |
1928 | |
1929 | fiji_initialize_power_tune_defaults(hwmgr); |
1930 | |
1931 | if (SMU7_VOLTAGE_CONTROL_NONE0x0 != data->voltage_control) |
1932 | fiji_populate_smc_voltage_tables(hwmgr, table); |
1933 | |
1934 | table->SystemFlags = 0; |
1935 | |
1936 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
1937 | PHM_PlatformCaps_AutomaticDCTransition)) |
1938 | table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC0x01; |
1939 | |
1940 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
1941 | PHM_PlatformCaps_StepVddc)) |
1942 | table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC0x02; |
1943 | |
1944 | if (data->is_memory_gddr5) |
1945 | table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR50x04; |
1946 | |
1947 | if (data->ulv_supported && table_info->us_ulv_voltage_offset) { |
1948 | result = fiji_populate_ulv_state(hwmgr, table); |
1949 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ULV state!" ); return result; } } while (0) |
1950 | "Failed to initialize ULV state!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ULV state!" ); return result; } } while (0); |
1951 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020015c,0x40035)) |
1952 | ixCG_ULV_PARAMETER, 0x40035)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020015c,0x40035)); |
1953 | } |
1954 | |
1955 | result = fiji_populate_smc_link_level(hwmgr, table); |
1956 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Link Level!" ); return result; } } while (0) |
1957 | "Failed to initialize Link Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Link Level!" ); return result; } } while (0); |
1958 | |
1959 | result = fiji_populate_all_graphic_levels(hwmgr); |
1960 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Graphics Level!" ); return result; } } while (0) |
1961 | "Failed to initialize Graphics Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Graphics Level!" ); return result; } } while (0); |
1962 | |
1963 | result = fiji_populate_all_memory_levels(hwmgr); |
1964 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Memory Level!" ); return result; } } while (0) |
1965 | "Failed to initialize Memory Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Memory Level!" ); return result; } } while (0); |
1966 | |
1967 | result = fiji_populate_smc_acpi_level(hwmgr, table); |
1968 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ACPI Level!" ); return result; } } while (0) |
1969 | "Failed to initialize ACPI Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ACPI Level!" ); return result; } } while (0); |
1970 | |
1971 | result = fiji_populate_smc_vce_level(hwmgr, table); |
1972 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize VCE Level!" ); return result; } } while (0) |
1973 | "Failed to initialize VCE Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize VCE Level!" ); return result; } } while (0); |
1974 | |
1975 | result = fiji_populate_smc_acp_level(hwmgr, table); |
1976 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ACP Level!" ); return result; } } while (0) |
1977 | "Failed to initialize ACP Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ACP Level!" ); return result; } } while (0); |
1978 | |
1979 | /* Since only the initial state is completely set up at this point |
1980 | * (the other states are just copies of the boot state) we only |
1981 | * need to populate the ARB settings for the initial state. |
1982 | */ |
1983 | result = fiji_program_memory_timing_parameters(hwmgr); |
1984 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to Write ARB settings for the initial state." ); return result; } } while (0) |
1985 | "Failed to Write ARB settings for the initial state.", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to Write ARB settings for the initial state." ); return result; } } while (0); |
1986 | |
1987 | result = fiji_populate_smc_uvd_level(hwmgr, table); |
1988 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize UVD Level!" ); return result; } } while (0) |
1989 | "Failed to initialize UVD Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize UVD Level!" ); return result; } } while (0); |
1990 | |
1991 | result = fiji_populate_smc_boot_level(hwmgr, table); |
1992 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Boot Level!" ); return result; } } while (0) |
1993 | "Failed to initialize Boot Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Boot Level!" ); return result; } } while (0); |
1994 | |
1995 | result = fiji_populate_smc_initailial_state(hwmgr); |
1996 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Boot State!" ); return result; } } while (0) |
1997 | "Failed to initialize Boot State!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Boot State!" ); return result; } } while (0); |
1998 | |
1999 | result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr); |
2000 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate BAPM Parameters!" ); return result; } } while (0) |
2001 | "Failed to populate BAPM Parameters!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate BAPM Parameters!" ); return result; } } while (0); |
2002 | |
2003 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
2004 | PHM_PlatformCaps_ClockStretcher)) { |
2005 | result = fiji_populate_clock_stretcher_data_table(hwmgr); |
2006 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate Clock Stretcher Data Table!" ); return result; } } while (0) |
2007 | "Failed to populate Clock Stretcher Data Table!",do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate Clock Stretcher Data Table!" ); return result; } } while (0) |
2008 | return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate Clock Stretcher Data Table!" ); return result; } } while (0); |
2009 | } |
2010 | |
2011 | table->GraphicsVoltageChangeEnable = 1; |
2012 | table->GraphicsThermThrottleEnable = 1; |
2013 | table->GraphicsInterval = 1; |
2014 | table->VoltageInterval = 1; |
2015 | table->ThermalInterval = 1; |
2016 | table->TemperatureLimitHigh = |
2017 | table_info->cac_dtp_table->usTargetOperatingTemp * |
2018 | SMU7_Q88_FORMAT_CONVERSION_UNIT256; |
2019 | table->TemperatureLimitLow = |
2020 | (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * |
2021 | SMU7_Q88_FORMAT_CONVERSION_UNIT256; |
2022 | table->MemoryVoltageChangeEnable = 1; |
2023 | table->MemoryInterval = 1; |
2024 | table->VoltageResponseTime = 0; |
2025 | table->PhaseResponseTime = 0; |
2026 | table->MemoryThermThrottleEnable = 1; |
2027 | table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ |
2028 | table->PCIeGenInterval = 1; |
2029 | table->VRConfig = 0; |
2030 | |
2031 | result = fiji_populate_vr_config(hwmgr, table); |
2032 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate VRConfig setting!" ); return result; } } while (0) |
2033 | "Failed to populate VRConfig setting!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate VRConfig setting!" ); return result; } } while (0); |
2034 | data->vr_config = table->VRConfig; |
2035 | table->ThermGpio = 17; |
2036 | table->SclkStepSize = 0x4000; |
2037 | |
2038 | if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID61, &gpio_pin)) { |
2039 | table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; |
2040 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
2041 | PHM_PlatformCaps_RegulatorHot); |
2042 | } else { |
2043 | table->VRHotGpio = SMU7_UNUSED_GPIO_PIN0x7F; |
2044 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
2045 | PHM_PlatformCaps_RegulatorHot); |
2046 | } |
2047 | |
2048 | if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID60, |
2049 | &gpio_pin)) { |
2050 | table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; |
2051 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
2052 | PHM_PlatformCaps_AutomaticDCTransition); |
2053 | } else { |
2054 | table->AcDcGpio = SMU7_UNUSED_GPIO_PIN0x7F; |
2055 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
2056 | PHM_PlatformCaps_AutomaticDCTransition); |
2057 | } |
2058 | |
2059 | /* Thermal Output GPIO */ |
2060 | if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID65, |
2061 | &gpio_pin)) { |
2062 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
2063 | PHM_PlatformCaps_ThermalOutGPIO); |
2064 | |
2065 | table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; |
2066 | |
2067 | /* For porlarity read GPIOPAD_A with assigned Gpio pin |
2068 | * since VBIOS will program this register to set 'inactive state', |
2069 | * driver can then determine 'active state' from this and |
2070 | * program SMU with correct polarity |
2071 | */ |
2072 | table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)(((struct cgs_device *)hwmgr->device)->ops->read_register (hwmgr->device,0x183)) & |
2073 | (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; |
2074 | table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY0x1; |
2075 | |
2076 | /* if required, combine VRHot/PCC with thermal out GPIO */ |
2077 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
2078 | PHM_PlatformCaps_RegulatorHot) && |
2079 | phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
2080 | PHM_PlatformCaps_CombinePCCWithThermalSignal)) |
2081 | table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT0x2; |
2082 | } else { |
2083 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
2084 | PHM_PlatformCaps_ThermalOutGPIO); |
2085 | table->ThermOutGpio = 17; |
2086 | table->ThermOutPolarity = 1; |
2087 | table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE0x0; |
2088 | } |
2089 | |
2090 | for (i = 0; i < SMU73_MAX_ENTRIES_SMIO32; i++) |
2091 | table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i])(__uint32_t)(__builtin_constant_p(table->Smio[i]) ? (__uint32_t )(((__uint32_t)(table->Smio[i]) & 0xff) << 24 | ( (__uint32_t)(table->Smio[i]) & 0xff00) << 8 | (( __uint32_t)(table->Smio[i]) & 0xff0000) >> 8 | ( (__uint32_t)(table->Smio[i]) & 0xff000000) >> 24 ) : __swap32md(table->Smio[i])); |
2092 | |
2093 | CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags)((table->SystemFlags) = (__uint32_t)(__builtin_constant_p( table->SystemFlags) ? (__uint32_t)(((__uint32_t)(table-> SystemFlags) & 0xff) << 24 | ((__uint32_t)(table-> SystemFlags) & 0xff00) << 8 | ((__uint32_t)(table-> SystemFlags) & 0xff0000) >> 8 | ((__uint32_t)(table ->SystemFlags) & 0xff000000) >> 24) : __swap32md (table->SystemFlags))); |
2094 | CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig)((table->VRConfig) = (__uint32_t)(__builtin_constant_p(table ->VRConfig) ? (__uint32_t)(((__uint32_t)(table->VRConfig ) & 0xff) << 24 | ((__uint32_t)(table->VRConfig) & 0xff00) << 8 | ((__uint32_t)(table->VRConfig) & 0xff0000) >> 8 | ((__uint32_t)(table->VRConfig ) & 0xff000000) >> 24) : __swap32md(table->VRConfig ))); |
2095 | CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1)((table->SmioMask1) = (__uint32_t)(__builtin_constant_p(table ->SmioMask1) ? (__uint32_t)(((__uint32_t)(table->SmioMask1 ) & 0xff) << 24 | ((__uint32_t)(table->SmioMask1 ) & 0xff00) << 8 | ((__uint32_t)(table->SmioMask1 ) & 0xff0000) >> 8 | ((__uint32_t)(table->SmioMask1 ) & 0xff000000) >> 24) : __swap32md(table->SmioMask1 ))); |
2096 | CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2)((table->SmioMask2) = (__uint32_t)(__builtin_constant_p(table ->SmioMask2) ? (__uint32_t)(((__uint32_t)(table->SmioMask2 ) & 0xff) << 24 | ((__uint32_t)(table->SmioMask2 ) & 0xff00) << 8 | ((__uint32_t)(table->SmioMask2 ) & 0xff0000) >> 8 | ((__uint32_t)(table->SmioMask2 ) & 0xff000000) >> 24) : __swap32md(table->SmioMask2 ))); |
2097 | CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize)((table->SclkStepSize) = (__uint32_t)(__builtin_constant_p (table->SclkStepSize) ? (__uint32_t)(((__uint32_t)(table-> SclkStepSize) & 0xff) << 24 | ((__uint32_t)(table-> SclkStepSize) & 0xff00) << 8 | ((__uint32_t)(table-> SclkStepSize) & 0xff0000) >> 8 | ((__uint32_t)(table ->SclkStepSize) & 0xff000000) >> 24) : __swap32md (table->SclkStepSize))); |
2098 | CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh)((table->TemperatureLimitHigh) = (__uint16_t)(__builtin_constant_p (table->TemperatureLimitHigh) ? (__uint16_t)(((__uint16_t) (table->TemperatureLimitHigh) & 0xffU) << 8 | (( __uint16_t)(table->TemperatureLimitHigh) & 0xff00U) >> 8) : __swap16md(table->TemperatureLimitHigh))); |
2099 | CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow)((table->TemperatureLimitLow) = (__uint16_t)(__builtin_constant_p (table->TemperatureLimitLow) ? (__uint16_t)(((__uint16_t)( table->TemperatureLimitLow) & 0xffU) << 8 | ((__uint16_t )(table->TemperatureLimitLow) & 0xff00U) >> 8) : __swap16md(table->TemperatureLimitLow))); |
2100 | CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime)((table->VoltageResponseTime) = (__uint16_t)(__builtin_constant_p (table->VoltageResponseTime) ? (__uint16_t)(((__uint16_t)( table->VoltageResponseTime) & 0xffU) << 8 | ((__uint16_t )(table->VoltageResponseTime) & 0xff00U) >> 8) : __swap16md(table->VoltageResponseTime))); |
2101 | CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime)((table->PhaseResponseTime) = (__uint16_t)(__builtin_constant_p (table->PhaseResponseTime) ? (__uint16_t)(((__uint16_t)(table ->PhaseResponseTime) & 0xffU) << 8 | ((__uint16_t )(table->PhaseResponseTime) & 0xff00U) >> 8) : __swap16md (table->PhaseResponseTime))); |
2102 | |
2103 | /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ |
2104 | result = smu7_copy_bytes_to_smc(hwmgr, |
2105 | smu_data->smu7_data.dpm_table_start + |
2106 | offsetof(SMU73_Discrete_DpmTable, SystemFlags)__builtin_offsetof(SMU73_Discrete_DpmTable, SystemFlags), |
2107 | (uint8_t *)&(table->SystemFlags), |
2108 | sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController), |
2109 | SMC_RAM_END0x40000); |
2110 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload dpm data to SMC memory!" ); return result; } } while (0) |
2111 | "Failed to upload dpm data to SMC memory!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload dpm data to SMC memory!" ); return result; } } while (0); |
2112 | |
2113 | result = fiji_init_arb_table_index(hwmgr); |
2114 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload arb data to SMC memory!" ); return result; } } while (0) |
2115 | "Failed to upload arb data to SMC memory!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload arb data to SMC memory!" ); return result; } } while (0); |
2116 | |
2117 | result = fiji_populate_pm_fuses(hwmgr); |
2118 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate PM fuses to SMC memory!" ); return result; } } while (0) |
2119 | "Failed to populate PM fuses to SMC memory!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate PM fuses to SMC memory!" ); return result; } } while (0); |
2120 | |
2121 | result = fiji_setup_dpm_led_config(hwmgr); |
2122 | PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to setup dpm led config" ); return result; } } while (0) |
2123 | "Failed to setup dpm led config", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to setup dpm led config" ); return result; } } while (0); |
2124 | |
2125 | return 0; |
2126 | } |
2127 | |
2128 | static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) |
2129 | { |
2130 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
2131 | |
2132 | SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE0 }; |
2133 | uint32_t duty100; |
2134 | uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; |
2135 | uint16_t fdo_min, slope1, slope2; |
2136 | uint32_t reference_clock; |
2137 | int res; |
2138 | uint64_t tmp64; |
2139 | |
2140 | if (hwmgr->thermal_controller.fanInfo.bNoFan) { |
2141 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
2142 | PHM_PlatformCaps_MicrocodeFanControl); |
2143 | return 0; |
2144 | } |
2145 | |
2146 | if (smu_data->smu7_data.fan_table_start == 0) { |
2147 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
2148 | PHM_PlatformCaps_MicrocodeFanControl); |
2149 | return 0; |
2150 | } |
2151 | |
2152 | duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0300068))) & 0xff) >> 0x0) |
2153 | CG_FDO_CTRL1, FMAX_DUTY100)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0300068))) & 0xff) >> 0x0); |
2154 | |
2155 | if (duty100 == 0) { |
2156 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
2157 | PHM_PlatformCaps_MicrocodeFanControl); |
2158 | return 0; |
2159 | } |
2160 | |
2161 | tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. |
2162 | usPWMMin * duty100; |
2163 | do_div(tmp64, 10000)({ uint32_t __base = (10000); uint32_t __rem = ((uint64_t)(tmp64 )) % __base; (tmp64) = ((uint64_t)(tmp64)) / __base; __rem; } ); |
2164 | fdo_min = (uint16_t)tmp64; |
2165 | |
2166 | t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - |
2167 | hwmgr->thermal_controller.advanceFanControlParameters.usTMin; |
2168 | t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - |
2169 | hwmgr->thermal_controller.advanceFanControlParameters.usTMed; |
2170 | |
2171 | pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - |
2172 | hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; |
2173 | pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - |
2174 | hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; |
2175 | |
2176 | slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); |
2177 | slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); |
2178 | |
2179 | fan_table.TempMin = cpu_to_be16((50 + hwmgr->(__uint16_t)(__builtin_constant_p((50 + hwmgr-> thermal_controller .advanceFanControlParameters.usTMin) / 100) ? (__uint16_t)((( __uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters .usTMin) / 100) & 0xffU) << 8 | ((__uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMin ) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMin) / 100 )) |
2180 | thermal_controller.advanceFanControlParameters.usTMin) / 100)(__uint16_t)(__builtin_constant_p((50 + hwmgr-> thermal_controller .advanceFanControlParameters.usTMin) / 100) ? (__uint16_t)((( __uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters .usTMin) / 100) & 0xffU) << 8 | ((__uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMin ) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMin) / 100 )); |
2181 | fan_table.TempMed = cpu_to_be16((50 + hwmgr->(__uint16_t)(__builtin_constant_p((50 + hwmgr-> thermal_controller .advanceFanControlParameters.usTMed) / 100) ? (__uint16_t)((( __uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters .usTMed) / 100) & 0xffU) << 8 | ((__uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMed ) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMed) / 100 )) |
2182 | thermal_controller.advanceFanControlParameters.usTMed) / 100)(__uint16_t)(__builtin_constant_p((50 + hwmgr-> thermal_controller .advanceFanControlParameters.usTMed) / 100) ? (__uint16_t)((( __uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters .usTMed) / 100) & 0xffU) << 8 | ((__uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMed ) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMed) / 100 )); |
2183 | fan_table.TempMax = cpu_to_be16((50 + hwmgr->(__uint16_t)(__builtin_constant_p((50 + hwmgr-> thermal_controller .advanceFanControlParameters.usTMax) / 100) ? (__uint16_t)((( __uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters .usTMax) / 100) & 0xffU) << 8 | ((__uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMax ) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMax) / 100 )) |
2184 | thermal_controller.advanceFanControlParameters.usTMax) / 100)(__uint16_t)(__builtin_constant_p((50 + hwmgr-> thermal_controller .advanceFanControlParameters.usTMax) / 100) ? (__uint16_t)((( __uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters .usTMax) / 100) & 0xffU) << 8 | ((__uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMax ) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMax) / 100 )); |
2185 | |
2186 | fan_table.Slope1 = cpu_to_be16(slope1)(__uint16_t)(__builtin_constant_p(slope1) ? (__uint16_t)(((__uint16_t )(slope1) & 0xffU) << 8 | ((__uint16_t)(slope1) & 0xff00U) >> 8) : __swap16md(slope1)); |
2187 | fan_table.Slope2 = cpu_to_be16(slope2)(__uint16_t)(__builtin_constant_p(slope2) ? (__uint16_t)(((__uint16_t )(slope2) & 0xffU) << 8 | ((__uint16_t)(slope2) & 0xff00U) >> 8) : __swap16md(slope2)); |
2188 | |
2189 | fan_table.FdoMin = cpu_to_be16(fdo_min)(__uint16_t)(__builtin_constant_p(fdo_min) ? (__uint16_t)(((__uint16_t )(fdo_min) & 0xffU) << 8 | ((__uint16_t)(fdo_min) & 0xff00U) >> 8) : __swap16md(fdo_min)); |
2190 | |
2191 | fan_table.HystDown = cpu_to_be16(hwmgr->(__uint16_t)(__builtin_constant_p(hwmgr-> thermal_controller .advanceFanControlParameters.ucTHyst) ? (__uint16_t)(((__uint16_t )(hwmgr-> thermal_controller.advanceFanControlParameters.ucTHyst ) & 0xffU) << 8 | ((__uint16_t)(hwmgr-> thermal_controller .advanceFanControlParameters.ucTHyst) & 0xff00U) >> 8) : __swap16md(hwmgr-> thermal_controller.advanceFanControlParameters .ucTHyst)) |
2192 | thermal_controller.advanceFanControlParameters.ucTHyst)(__uint16_t)(__builtin_constant_p(hwmgr-> thermal_controller .advanceFanControlParameters.ucTHyst) ? (__uint16_t)(((__uint16_t )(hwmgr-> thermal_controller.advanceFanControlParameters.ucTHyst ) & 0xffU) << 8 | ((__uint16_t)(hwmgr-> thermal_controller .advanceFanControlParameters.ucTHyst) & 0xff00U) >> 8) : __swap16md(hwmgr-> thermal_controller.advanceFanControlParameters .ucTHyst)); |
2193 | |
2194 | fan_table.HystUp = cpu_to_be16(1)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t )(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U ) >> 8) : __swap16md(1)); |
2195 | |
2196 | fan_table.HystSlope = cpu_to_be16(1)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t )(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U ) >> 8) : __swap16md(1)); |
2197 | |
2198 | fan_table.TempRespLim = cpu_to_be16(5)(__uint16_t)(__builtin_constant_p(5) ? (__uint16_t)(((__uint16_t )(5) & 0xffU) << 8 | ((__uint16_t)(5) & 0xff00U ) >> 8) : __swap16md(5)); |
2199 | |
2200 | reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev)((struct amdgpu_device *)hwmgr->adev)->asic_funcs->get_xclk (((struct amdgpu_device *)hwmgr->adev)); |
2201 | |
2202 | fan_table.RefreshPeriod = cpu_to_be32((hwmgr->(__uint32_t)(__builtin_constant_p((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) ? (__uint32_t)(((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff) << 24 | ((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff00) << 8 | ((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff0000) >> 8 | ((__uint32_t)((hwmgr-> thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff000000) >> 24) : __swap32md ((hwmgr-> thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600)) |
2203 | thermal_controller.advanceFanControlParameters.ulCycleDelay *(__uint32_t)(__builtin_constant_p((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) ? (__uint32_t)(((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff) << 24 | ((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff00) << 8 | ((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff0000) >> 8 | ((__uint32_t)((hwmgr-> thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff000000) >> 24) : __swap32md ((hwmgr-> thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600)) |
2204 | reference_clock) / 1600)(__uint32_t)(__builtin_constant_p((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) ? (__uint32_t)(((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff) << 24 | ((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff00) << 8 | ((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff0000) >> 8 | ((__uint32_t)((hwmgr-> thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff000000) >> 24) : __swap32md ((hwmgr-> thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600)); |
2205 | |
2206 | fan_table.FdoMax = cpu_to_be16((uint16_t)duty100)(__uint16_t)(__builtin_constant_p((uint16_t)duty100) ? (__uint16_t )(((__uint16_t)((uint16_t)duty100) & 0xffU) << 8 | ( (__uint16_t)((uint16_t)duty100) & 0xff00U) >> 8) : __swap16md ((uint16_t)duty100)); |
2207 | |
2208 | fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0300010))) & 0xff00000 ) >> 0x14) |
2209 | hwmgr->device, CGS_IND_REG__SMC,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0300010))) & 0xff00000 ) >> 0x14) |
2210 | CG_MULT_THERMAL_CTRL, TEMP_SEL)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0300010))) & 0xff00000 ) >> 0x14); |
2211 | |
2212 | res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start, |
2213 | (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), |
2214 | SMC_RAM_END0x40000); |
2215 | |
2216 | if (!res && hwmgr->thermal_controller. |
2217 | advanceFanControlParameters.ucMinimumPWMLimit) |
2218 | res = smum_send_msg_to_smc_with_parameter(hwmgr, |
2219 | PPSMC_MSG_SetFanMinPwm((uint16_t) 0x209), |
2220 | hwmgr->thermal_controller. |
2221 | advanceFanControlParameters.ucMinimumPWMLimit, |
2222 | NULL((void *)0)); |
2223 | |
2224 | if (!res && hwmgr->thermal_controller. |
2225 | advanceFanControlParameters.ulMinFanSCLKAcousticLimit) |
2226 | res = smum_send_msg_to_smc_with_parameter(hwmgr, |
2227 | PPSMC_MSG_SetFanSclkTarget((uint16_t) 0x206), |
2228 | hwmgr->thermal_controller. |
2229 | advanceFanControlParameters.ulMinFanSCLKAcousticLimit, |
2230 | NULL((void *)0)); |
2231 | |
2232 | if (res) |
2233 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
2234 | PHM_PlatformCaps_MicrocodeFanControl); |
2235 | |
2236 | return 0; |
2237 | } |
2238 | |
2239 | |
2240 | static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr) |
2241 | { |
2242 | if (!hwmgr->avfs_supported) |
2243 | return 0; |
2244 | |
2245 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs((uint16_t) 0x26A), NULL((void *)0)); |
2246 | |
2247 | return 0; |
2248 | } |
2249 | |
2250 | static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) |
2251 | { |
2252 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
2253 | |
2254 | if (data->need_update_smu7_dpm_table & |
2255 | (DPMTABLE_OD_UPDATE_SCLK0x00000001 + DPMTABLE_OD_UPDATE_MCLK0x00000002)) |
2256 | return fiji_program_memory_timing_parameters(hwmgr); |
2257 | |
2258 | return 0; |
2259 | } |
2260 | |
2261 | static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) |
2262 | { |
2263 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
2264 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
2265 | |
2266 | int result = 0; |
2267 | uint32_t low_sclk_interrupt_threshold = 0; |
2268 | |
2269 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
2270 | PHM_PlatformCaps_SclkThrottleLowNotification) |
2271 | && (data->low_sclk_interrupt_threshold != 0)) { |
2272 | low_sclk_interrupt_threshold = |
2273 | data->low_sclk_interrupt_threshold; |
2274 | |
2275 | CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold)((low_sclk_interrupt_threshold) = (__uint32_t)(__builtin_constant_p (low_sclk_interrupt_threshold) ? (__uint32_t)(((__uint32_t)(low_sclk_interrupt_threshold ) & 0xff) << 24 | ((__uint32_t)(low_sclk_interrupt_threshold ) & 0xff00) << 8 | ((__uint32_t)(low_sclk_interrupt_threshold ) & 0xff0000) >> 8 | ((__uint32_t)(low_sclk_interrupt_threshold ) & 0xff000000) >> 24) : __swap32md(low_sclk_interrupt_threshold ))); |
2276 | |
2277 | result = smu7_copy_bytes_to_smc( |
2278 | hwmgr, |
2279 | smu_data->smu7_data.dpm_table_start + |
2280 | offsetof(SMU73_Discrete_DpmTable,__builtin_offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold ) |
2281 | LowSclkInterruptThreshold)__builtin_offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold ), |
2282 | (uint8_t *)&low_sclk_interrupt_threshold, |
2283 | sizeof(uint32_t), |
2284 | SMC_RAM_END0x40000); |
2285 | } |
2286 | result = fiji_program_mem_timing_parameters(hwmgr); |
2287 | PP_ASSERT_WITH_CODE((result == 0),do { if (!((result == 0))) { printk("\0014" "amdgpu: " "%s\n" , "Failed to program memory timing parameters!"); ; } } while (0) |
2288 | "Failed to program memory timing parameters!",do { if (!((result == 0))) { printk("\0014" "amdgpu: " "%s\n" , "Failed to program memory timing parameters!"); ; } } while (0) |
2289 | )do { if (!((result == 0))) { printk("\0014" "amdgpu: " "%s\n" , "Failed to program memory timing parameters!"); ; } } while (0); |
2290 | return result; |
2291 | } |
2292 | |
2293 | static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) |
2294 | { |
2295 | switch (type) { |
2296 | case SMU_SoftRegisters: |
2297 | switch (member) { |
2298 | case HandshakeDisables: |
2299 | return offsetof(SMU73_SoftRegisters, HandshakeDisables)__builtin_offsetof(SMU73_SoftRegisters, HandshakeDisables); |
2300 | case VoltageChangeTimeout: |
2301 | return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout)__builtin_offsetof(SMU73_SoftRegisters, VoltageChangeTimeout); |
2302 | case AverageGraphicsActivity: |
2303 | return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity)__builtin_offsetof(SMU73_SoftRegisters, AverageGraphicsActivity ); |
2304 | case AverageMemoryActivity: |
2305 | return offsetof(SMU73_SoftRegisters, AverageMemoryActivity)__builtin_offsetof(SMU73_SoftRegisters, AverageMemoryActivity ); |
2306 | case PreVBlankGap: |
2307 | return offsetof(SMU73_SoftRegisters, PreVBlankGap)__builtin_offsetof(SMU73_SoftRegisters, PreVBlankGap); |
2308 | case VBlankTimeout: |
2309 | return offsetof(SMU73_SoftRegisters, VBlankTimeout)__builtin_offsetof(SMU73_SoftRegisters, VBlankTimeout); |
2310 | case UcodeLoadStatus: |
2311 | return offsetof(SMU73_SoftRegisters, UcodeLoadStatus)__builtin_offsetof(SMU73_SoftRegisters, UcodeLoadStatus); |
2312 | case DRAM_LOG_ADDR_H: |
2313 | return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_H)__builtin_offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_H); |
2314 | case DRAM_LOG_ADDR_L: |
2315 | return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_L)__builtin_offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_L); |
2316 | case DRAM_LOG_PHY_ADDR_H: |
2317 | return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_H)__builtin_offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_H); |
2318 | case DRAM_LOG_PHY_ADDR_L: |
2319 | return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_L)__builtin_offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_L); |
2320 | case DRAM_LOG_BUFF_SIZE: |
2321 | return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE)__builtin_offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE); |
2322 | } |
2323 | break; |
2324 | case SMU_Discrete_DpmTable: |
2325 | switch (member) { |
2326 | case UvdBootLevel: |
2327 | return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, UvdBootLevel); |
2328 | case VceBootLevel: |
2329 | return offsetof(SMU73_Discrete_DpmTable, VceBootLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, VceBootLevel); |
2330 | case LowSclkInterruptThreshold: |
2331 | return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold)__builtin_offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold ); |
2332 | } |
2333 | break; |
2334 | } |
2335 | pr_warn("can't get the offset of type %x member %x\n", type, member)printk("\0014" "amdgpu: " "can't get the offset of type %x member %x\n" , type, member); |
2336 | return 0; |
2337 | } |
2338 | |
2339 | static uint32_t fiji_get_mac_definition(uint32_t value) |
2340 | { |
2341 | switch (value) { |
2342 | case SMU_MAX_LEVELS_GRAPHICS: |
2343 | return SMU73_MAX_LEVELS_GRAPHICS8; |
2344 | case SMU_MAX_LEVELS_MEMORY: |
2345 | return SMU73_MAX_LEVELS_MEMORY4; |
2346 | case SMU_MAX_LEVELS_LINK: |
2347 | return SMU73_MAX_LEVELS_LINK8; |
2348 | case SMU_MAX_ENTRIES_SMIO: |
2349 | return SMU73_MAX_ENTRIES_SMIO32; |
2350 | case SMU_MAX_LEVELS_VDDC: |
2351 | return SMU73_MAX_LEVELS_VDDC16; |
2352 | case SMU_MAX_LEVELS_VDDGFX: |
2353 | return SMU73_MAX_LEVELS_VDDGFX16; |
2354 | case SMU_MAX_LEVELS_VDDCI: |
2355 | return SMU73_MAX_LEVELS_VDDCI8; |
2356 | case SMU_MAX_LEVELS_MVDD: |
2357 | return SMU73_MAX_LEVELS_MVDD4; |
2358 | } |
2359 | |
2360 | pr_warn("can't get the mac of %x\n", value)printk("\0014" "amdgpu: " "can't get the mac of %x\n", value); |
2361 | return 0; |
2362 | } |
2363 | |
2364 | |
2365 | static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) |
2366 | { |
2367 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
2368 | uint32_t mm_boot_level_offset, mm_boot_level_value; |
2369 | struct phm_ppt_v1_information *table_info = |
2370 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
2371 | |
2372 | smu_data->smc_state_table.UvdBootLevel = 0; |
2373 | if (table_info->mm_dep_table->count > 0) |
2374 | smu_data->smc_state_table.UvdBootLevel = |
2375 | (uint8_t) (table_info->mm_dep_table->count - 1); |
2376 | mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,__builtin_offsetof(SMU73_Discrete_DpmTable, UvdBootLevel) |
2377 | UvdBootLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, UvdBootLevel); |
2378 | mm_boot_level_offset /= 4; |
2379 | mm_boot_level_offset *= 4; |
2380 | mm_boot_level_value = cgs_read_ind_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset)) |
2381 | CGS_IND_REG__SMC, mm_boot_level_offset)(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset)); |
2382 | mm_boot_level_value &= 0x00FFFFFF; |
2383 | mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24; |
2384 | cgs_write_ind_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value )) |
2385 | CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value )); |
2386 | |
2387 | if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
2388 | PHM_PlatformCaps_UVDDPM) || |
2389 | phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
2390 | PHM_PlatformCaps_StablePState)) |
2391 | smum_send_msg_to_smc_with_parameter(hwmgr, |
2392 | PPSMC_MSG_UVDDPM_SetEnabledMask((uint16_t) 0x12D), |
2393 | (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel), |
2394 | NULL((void *)0)); |
2395 | return 0; |
2396 | } |
2397 | |
2398 | static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) |
2399 | { |
2400 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
2401 | uint32_t mm_boot_level_offset, mm_boot_level_value; |
2402 | struct phm_ppt_v1_information *table_info = |
2403 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
2404 | |
2405 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
2406 | PHM_PlatformCaps_StablePState)) |
2407 | smu_data->smc_state_table.VceBootLevel = |
2408 | (uint8_t) (table_info->mm_dep_table->count - 1); |
2409 | else |
2410 | smu_data->smc_state_table.VceBootLevel = 0; |
2411 | |
2412 | mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + |
2413 | offsetof(SMU73_Discrete_DpmTable, VceBootLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, VceBootLevel); |
2414 | mm_boot_level_offset /= 4; |
2415 | mm_boot_level_offset *= 4; |
2416 | mm_boot_level_value = cgs_read_ind_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset)) |
2417 | CGS_IND_REG__SMC, mm_boot_level_offset)(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset)); |
2418 | mm_boot_level_value &= 0xFF00FFFF; |
2419 | mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16; |
2420 | cgs_write_ind_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value )) |
2421 | CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value )); |
2422 | |
2423 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) |
2424 | smum_send_msg_to_smc_with_parameter(hwmgr, |
2425 | PPSMC_MSG_VCEDPM_SetEnabledMask((uint16_t) 0x12E), |
2426 | (uint32_t)1 << smu_data->smc_state_table.VceBootLevel, |
2427 | NULL((void *)0)); |
2428 | return 0; |
2429 | } |
2430 | |
2431 | static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) |
2432 | { |
2433 | switch (type) { |
2434 | case SMU_UVD_TABLE: |
2435 | fiji_update_uvd_smc_table(hwmgr); |
2436 | break; |
2437 | case SMU_VCE_TABLE: |
2438 | fiji_update_vce_smc_table(hwmgr); |
2439 | break; |
2440 | default: |
2441 | break; |
2442 | } |
2443 | return 0; |
2444 | } |
2445 | |
2446 | static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) |
2447 | { |
2448 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
2449 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); |
2450 | uint32_t tmp; |
2451 | int result; |
2452 | bool_Bool error = false0; |
2453 | |
2454 | result = smu7_read_smc_sram_dword(hwmgr, |
2455 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + |
2456 | offsetof(SMU73_Firmware_Header, DpmTable)__builtin_offsetof(SMU73_Firmware_Header, DpmTable), |
2457 | &tmp, SMC_RAM_END0x40000); |
2458 | |
2459 | if (0 == result) |
2460 | smu_data->smu7_data.dpm_table_start = tmp; |
2461 | |
2462 | error |= (0 != result); |
2463 | |
2464 | result = smu7_read_smc_sram_dword(hwmgr, |
2465 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + |
2466 | offsetof(SMU73_Firmware_Header, SoftRegisters)__builtin_offsetof(SMU73_Firmware_Header, SoftRegisters), |
2467 | &tmp, SMC_RAM_END0x40000); |
2468 | |
2469 | if (!result) { |
2470 | data->soft_regs_start = tmp; |
2471 | smu_data->smu7_data.soft_regs_start = tmp; |
2472 | } |
2473 | |
2474 | error |= (0 != result); |
2475 | |
2476 | result = smu7_read_smc_sram_dword(hwmgr, |
2477 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + |
2478 | offsetof(SMU73_Firmware_Header, mcRegisterTable)__builtin_offsetof(SMU73_Firmware_Header, mcRegisterTable), |
2479 | &tmp, SMC_RAM_END0x40000); |
2480 | |
2481 | if (!result) |
2482 | smu_data->smu7_data.mc_reg_table_start = tmp; |
2483 | |
2484 | result = smu7_read_smc_sram_dword(hwmgr, |
2485 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + |
2486 | offsetof(SMU73_Firmware_Header, FanTable)__builtin_offsetof(SMU73_Firmware_Header, FanTable), |
2487 | &tmp, SMC_RAM_END0x40000); |
2488 | |
2489 | if (!result) |
2490 | smu_data->smu7_data.fan_table_start = tmp; |
2491 | |
2492 | error |= (0 != result); |
2493 | |
2494 | result = smu7_read_smc_sram_dword(hwmgr, |
2495 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + |
2496 | offsetof(SMU73_Firmware_Header, mcArbDramTimingTable)__builtin_offsetof(SMU73_Firmware_Header, mcArbDramTimingTable ), |
2497 | &tmp, SMC_RAM_END0x40000); |
2498 | |
2499 | if (!result) |
2500 | smu_data->smu7_data.arb_table_start = tmp; |
2501 | |
2502 | error |= (0 != result); |
2503 | |
2504 | result = smu7_read_smc_sram_dword(hwmgr, |
2505 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + |
2506 | offsetof(SMU73_Firmware_Header, Version)__builtin_offsetof(SMU73_Firmware_Header, Version), |
2507 | &tmp, SMC_RAM_END0x40000); |
2508 | |
2509 | if (!result) |
2510 | hwmgr->microcode_version_info.SMC = tmp; |
2511 | |
2512 | error |= (0 != result); |
2513 | |
2514 | return error ? -1 : 0; |
2515 | } |
2516 | |
2517 | static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) |
2518 | { |
2519 | |
2520 | /* Program additional LP registers |
2521 | * that are no longer programmed by VBIOS |
2522 | */ |
2523 | cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xa9b,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa28)))) |
2524 | cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xa9b,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa28)))); |
2525 | cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xa9c,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa29)))) |
2526 | cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xa9c,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa29)))); |
2527 | cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xa9e,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2b)))) |
2528 | cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xa9e,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2b)))); |
2529 | cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xaa0,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa30)))) |
2530 | cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xaa0,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa30)))); |
2531 | cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xac7,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2d)))) |
2532 | cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xac7,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2d)))); |
2533 | cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xac8,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2e)))) |
2534 | cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xac8,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2e)))); |
2535 | cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xad3,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2c)))) |
2536 | cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xad3,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2c)))); |
2537 | |
2538 | return 0; |
2539 | } |
2540 | |
2541 | static bool_Bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr) |
2542 | { |
2543 | return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x3f010))) & 0x2000) >> 0xd) |
2544 | CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x3f010))) & 0x2000) >> 0xd)) |
2545 | ? true1 : false0; |
2546 | } |
2547 | |
2548 | static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr, |
2549 | void *profile_setting) |
2550 | { |
2551 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
2552 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *) |
2553 | (hwmgr->smu_backend); |
2554 | struct profile_mode_setting *setting; |
2555 | struct SMU73_Discrete_GraphicsLevel *levels = |
2556 | smu_data->smc_state_table.GraphicsLevel; |
2557 | uint32_t array = smu_data->smu7_data.dpm_table_start + |
2558 | offsetof(SMU73_Discrete_DpmTable, GraphicsLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); |
2559 | |
2560 | uint32_t mclk_array = smu_data->smu7_data.dpm_table_start + |
2561 | offsetof(SMU73_Discrete_DpmTable, MemoryLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, MemoryLevel); |
2562 | struct SMU73_Discrete_MemoryLevel *mclk_levels = |
2563 | smu_data->smc_state_table.MemoryLevel; |
2564 | uint32_t i; |
2565 | uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp; |
2566 | |
2567 | if (profile_setting == NULL((void *)0)) |
2568 | return -EINVAL22; |
2569 | |
2570 | setting = (struct profile_mode_setting *)profile_setting; |
2571 | |
2572 | if (setting->bupdate_sclk) { |
2573 | if (!data->sclk_dpm_key_disabled) |
2574 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel((uint16_t) 0x189), NULL((void *)0)); |
2575 | for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { |
2576 | if (levels[i].ActivityLevel != |
2577 | cpu_to_be16(setting->sclk_activity)(__uint16_t)(__builtin_constant_p(setting->sclk_activity) ? (__uint16_t)(((__uint16_t)(setting->sclk_activity) & 0xffU ) << 8 | ((__uint16_t)(setting->sclk_activity) & 0xff00U) >> 8) : __swap16md(setting->sclk_activity) )) { |
2578 | levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity)(__uint16_t)(__builtin_constant_p(setting->sclk_activity) ? (__uint16_t)(((__uint16_t)(setting->sclk_activity) & 0xffU ) << 8 | ((__uint16_t)(setting->sclk_activity) & 0xff00U) >> 8) : __swap16md(setting->sclk_activity) ); |
2579 | |
2580 | clk_activity_offset = array + (sizeof(SMU73_Discrete_GraphicsLevel) * i) |
2581 | + offsetof(SMU73_Discrete_GraphicsLevel, ActivityLevel)__builtin_offsetof(SMU73_Discrete_GraphicsLevel, ActivityLevel ); |
2582 | offset = clk_activity_offset & ~0x3; |
2583 | tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset))(__uint32_t)(__builtin_constant_p((((struct cgs_device *)hwmgr ->device)->ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC ,offset))) ? (__uint32_t)(((__uint32_t)((((struct cgs_device * )hwmgr->device)->ops->read_ind_register(hwmgr->device ,CGS_IND_REG__SMC,offset))) & 0xff) << 24 | ((__uint32_t )((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset))) & 0xff00) << 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)-> ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset ))) & 0xff0000) >> 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,offset))) & 0xff000000) >> 24 ) : __swap32md((((struct cgs_device *)hwmgr->device)->ops ->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset )))); |
2584 | tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t)); |
2585 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp))(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset,(__uint32_t)(__builtin_constant_p (tmp) ? (__uint32_t)(((__uint32_t)(tmp) & 0xff) << 24 | ((__uint32_t)(tmp) & 0xff00) << 8 | ((__uint32_t )(tmp) & 0xff0000) >> 8 | ((__uint32_t)(tmp) & 0xff000000 ) >> 24) : __swap32md(tmp)))); |
2586 | |
2587 | } |
2588 | if (levels[i].UpHyst != setting->sclk_up_hyst || |
2589 | levels[i].DownHyst != setting->sclk_down_hyst) { |
2590 | levels[i].UpHyst = setting->sclk_up_hyst; |
2591 | levels[i].DownHyst = setting->sclk_down_hyst; |
2592 | up_hyst_offset = array + (sizeof(SMU73_Discrete_GraphicsLevel) * i) |
2593 | + offsetof(SMU73_Discrete_GraphicsLevel, UpHyst)__builtin_offsetof(SMU73_Discrete_GraphicsLevel, UpHyst); |
2594 | down_hyst_offset = array + (sizeof(SMU73_Discrete_GraphicsLevel) * i) |
2595 | + offsetof(SMU73_Discrete_GraphicsLevel, DownHyst)__builtin_offsetof(SMU73_Discrete_GraphicsLevel, DownHyst); |
2596 | offset = up_hyst_offset & ~0x3; |
2597 | tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset))(__uint32_t)(__builtin_constant_p((((struct cgs_device *)hwmgr ->device)->ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC ,offset))) ? (__uint32_t)(((__uint32_t)((((struct cgs_device * )hwmgr->device)->ops->read_ind_register(hwmgr->device ,CGS_IND_REG__SMC,offset))) & 0xff) << 24 | ((__uint32_t )((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset))) & 0xff00) << 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)-> ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset ))) & 0xff0000) >> 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,offset))) & 0xff000000) >> 24 ) : __swap32md((((struct cgs_device *)hwmgr->device)->ops ->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset )))); |
2598 | tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpHyst, sizeof(uint8_t)); |
2599 | tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownHyst, sizeof(uint8_t)); |
2600 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp))(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset,(__uint32_t)(__builtin_constant_p (tmp) ? (__uint32_t)(((__uint32_t)(tmp) & 0xff) << 24 | ((__uint32_t)(tmp) & 0xff00) << 8 | ((__uint32_t )(tmp) & 0xff0000) >> 8 | ((__uint32_t)(tmp) & 0xff000000 ) >> 24) : __swap32md(tmp)))); |
2601 | } |
2602 | } |
2603 | if (!data->sclk_dpm_key_disabled) |
2604 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel((uint16_t) 0x18A), NULL((void *)0)); |
2605 | } |
2606 | |
2607 | if (setting->bupdate_mclk) { |
2608 | if (!data->mclk_dpm_key_disabled) |
2609 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel((uint16_t) 0x18B), NULL((void *)0)); |
2610 | for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) { |
2611 | if (mclk_levels[i].ActivityLevel != |
2612 | cpu_to_be16(setting->mclk_activity)(__uint16_t)(__builtin_constant_p(setting->mclk_activity) ? (__uint16_t)(((__uint16_t)(setting->mclk_activity) & 0xffU ) << 8 | ((__uint16_t)(setting->mclk_activity) & 0xff00U) >> 8) : __swap16md(setting->mclk_activity) )) { |
2613 | mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity)(__uint16_t)(__builtin_constant_p(setting->mclk_activity) ? (__uint16_t)(((__uint16_t)(setting->mclk_activity) & 0xffU ) << 8 | ((__uint16_t)(setting->mclk_activity) & 0xff00U) >> 8) : __swap16md(setting->mclk_activity) ); |
2614 | |
2615 | clk_activity_offset = mclk_array + (sizeof(SMU73_Discrete_MemoryLevel) * i) |
2616 | + offsetof(SMU73_Discrete_MemoryLevel, ActivityLevel)__builtin_offsetof(SMU73_Discrete_MemoryLevel, ActivityLevel); |
2617 | offset = clk_activity_offset & ~0x3; |
2618 | tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset))(__uint32_t)(__builtin_constant_p((((struct cgs_device *)hwmgr ->device)->ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC ,offset))) ? (__uint32_t)(((__uint32_t)((((struct cgs_device * )hwmgr->device)->ops->read_ind_register(hwmgr->device ,CGS_IND_REG__SMC,offset))) & 0xff) << 24 | ((__uint32_t )((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset))) & 0xff00) << 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)-> ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset ))) & 0xff0000) >> 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,offset))) & 0xff000000) >> 24 ) : __swap32md((((struct cgs_device *)hwmgr->device)->ops ->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset )))); |
2619 | tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t)); |
2620 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp))(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset,(__uint32_t)(__builtin_constant_p (tmp) ? (__uint32_t)(((__uint32_t)(tmp) & 0xff) << 24 | ((__uint32_t)(tmp) & 0xff00) << 8 | ((__uint32_t )(tmp) & 0xff0000) >> 8 | ((__uint32_t)(tmp) & 0xff000000 ) >> 24) : __swap32md(tmp)))); |
2621 | |
2622 | } |
2623 | if (mclk_levels[i].UpHyst != setting->mclk_up_hyst || |
2624 | mclk_levels[i].DownHyst != setting->mclk_down_hyst) { |
2625 | mclk_levels[i].UpHyst = setting->mclk_up_hyst; |
2626 | mclk_levels[i].DownHyst = setting->mclk_down_hyst; |
2627 | up_hyst_offset = mclk_array + (sizeof(SMU73_Discrete_MemoryLevel) * i) |
2628 | + offsetof(SMU73_Discrete_MemoryLevel, UpHyst)__builtin_offsetof(SMU73_Discrete_MemoryLevel, UpHyst); |
2629 | down_hyst_offset = mclk_array + (sizeof(SMU73_Discrete_MemoryLevel) * i) |
2630 | + offsetof(SMU73_Discrete_MemoryLevel, DownHyst)__builtin_offsetof(SMU73_Discrete_MemoryLevel, DownHyst); |
2631 | offset = up_hyst_offset & ~0x3; |
2632 | tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset))(__uint32_t)(__builtin_constant_p((((struct cgs_device *)hwmgr ->device)->ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC ,offset))) ? (__uint32_t)(((__uint32_t)((((struct cgs_device * )hwmgr->device)->ops->read_ind_register(hwmgr->device ,CGS_IND_REG__SMC,offset))) & 0xff) << 24 | ((__uint32_t )((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset))) & 0xff00) << 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)-> ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset ))) & 0xff0000) >> 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,offset))) & 0xff000000) >> 24 ) : __swap32md((((struct cgs_device *)hwmgr->device)->ops ->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset )))); |
2633 | tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpHyst, sizeof(uint8_t)); |
2634 | tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownHyst, sizeof(uint8_t)); |
2635 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp))(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset,(__uint32_t)(__builtin_constant_p (tmp) ? (__uint32_t)(((__uint32_t)(tmp) & 0xff) << 24 | ((__uint32_t)(tmp) & 0xff00) << 8 | ((__uint32_t )(tmp) & 0xff0000) >> 8 | ((__uint32_t)(tmp) & 0xff000000 ) >> 24) : __swap32md(tmp)))); |
2636 | } |
2637 | } |
2638 | if (!data->mclk_dpm_key_disabled) |
2639 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel((uint16_t) 0x18C), NULL((void *)0)); |
2640 | } |
2641 | return 0; |
2642 | } |
2643 | |
2644 | const struct pp_smumgr_func fiji_smu_funcs = { |
2645 | .name = "fiji_smu", |
2646 | .smu_init = &fiji_smu_init, |
2647 | .smu_fini = &smu7_smu_fini, |
2648 | .start_smu = &fiji_start_smu, |
2649 | .check_fw_load_finish = &smu7_check_fw_load_finish, |
2650 | .request_smu_load_fw = &smu7_reload_firmware, |
2651 | .request_smu_load_specific_fw = NULL((void *)0), |
2652 | .send_msg_to_smc = &smu7_send_msg_to_smc, |
2653 | .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, |
2654 | .get_argument = smu7_get_argument, |
2655 | .download_pptable_settings = NULL((void *)0), |
2656 | .upload_pptable_settings = NULL((void *)0), |
2657 | .update_smc_table = fiji_update_smc_table, |
2658 | .get_offsetof = fiji_get_offsetof, |
2659 | .process_firmware_header = fiji_process_firmware_header, |
2660 | .init_smc_table = fiji_init_smc_table, |
2661 | .update_sclk_threshold = fiji_update_sclk_threshold, |
2662 | .thermal_setup_fan_table = fiji_thermal_setup_fan_table, |
2663 | .thermal_avfs_enable = fiji_thermal_avfs_enable, |
2664 | .populate_all_graphic_levels = fiji_populate_all_graphic_levels, |
2665 | .populate_all_memory_levels = fiji_populate_all_memory_levels, |
2666 | .get_mac_definition = fiji_get_mac_definition, |
2667 | .initialize_mc_reg_table = fiji_initialize_mc_reg_table, |
2668 | .is_dpm_running = fiji_is_dpm_running, |
2669 | .is_hw_avfs_present = fiji_is_hw_avfs_present, |
2670 | .update_dpm_settings = fiji_update_dpm_settings, |
2671 | }; |