| File: | dev/pci/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c | 
| Warning: | line 1615, column 2 Value stored to 'result' is never read  | 
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | 
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. | 
| 3 | * | 
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | 
| 5 | * copy of this software and associated documentation files (the "Software"), | 
| 6 | * to deal in the Software without restriction, including without limitation | 
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | 
| 9 | * Software is furnished to do so, subject to the following conditions: | 
| 10 | * | 
| 11 | * The above copyright notice and this permission notice shall be included in | 
| 12 | * all copies or substantial portions of the Software. | 
| 13 | * | 
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | 
| 21 | * | 
| 22 | */ | 
| 23 | |
| 24 | #include "pp_debug.h" | 
| 25 | #include "smumgr.h" | 
| 26 | #include "smu7_dyn_defaults.h" | 
| 27 | #include "smu73.h" | 
| 28 | #include "smu_ucode_xfer_vi.h" | 
| 29 | #include "fiji_smumgr.h" | 
| 30 | #include "fiji_ppsmc.h" | 
| 31 | #include "smu73_discrete.h" | 
| 32 | #include "ppatomctrl.h" | 
| 33 | #include "smu/smu_7_1_3_d.h" | 
| 34 | #include "smu/smu_7_1_3_sh_mask.h" | 
| 35 | #include "gmc/gmc_8_1_d.h" | 
| 36 | #include "gmc/gmc_8_1_sh_mask.h" | 
| 37 | #include "oss/oss_3_0_d.h" | 
| 38 | #include "gca/gfx_8_0_d.h" | 
| 39 | #include "bif/bif_5_0_d.h" | 
| 40 | #include "bif/bif_5_0_sh_mask.h" | 
| 41 | #include "dce/dce_10_0_d.h" | 
| 42 | #include "dce/dce_10_0_sh_mask.h" | 
| 43 | #include "hardwaremanager.h" | 
| 44 | #include "cgs_common.h" | 
| 45 | #include "atombios.h" | 
| 46 | #include "pppcielanes.h" | 
| 47 | #include "hwmgr.h" | 
| 48 | #include "smu7_hwmgr.h" | 
| 49 | |
| 50 | |
| 51 | #define AVFS_EN_MSB1568 1568 | 
| 52 | #define AVFS_EN_LSB1568 1568 | 
| 53 | |
| 54 | #define FIJI_SMC_SIZE0x20000 0x20000 | 
| 55 | |
| 56 | #define POWERTUNE_DEFAULT_SET_MAX1 1 | 
| 57 | #define VDDC_VDDCI_DELTA300 300 | 
| 58 | #define MC_CG_ARB_FREQ_F10x0b 0x0b | 
| 59 | |
| 60 | /* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs | 
| 61 | * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] | 
| 62 | */ | 
| 63 | static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = { | 
| 64 | {600, 1050, 3, 0}, {600, 1050, 6, 1} }; | 
| 65 | |
| 66 | /* [FF, SS] type, [] 4 voltage ranges, and | 
| 67 | * [Floor Freq, Boundary Freq, VID min , VID max] | 
| 68 | */ | 
| 69 | static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = { | 
| 70 | { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, | 
| 71 | { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; | 
| 72 | |
| 73 | /* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] | 
| 74 | * (coming from PWR_CKS_CNTL.stretch_amount reg spec) | 
| 75 | */ | 
| 76 | static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = { | 
| 77 | {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} }; | 
| 78 | |
| 79 | static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX1] = { | 
| 80 | /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */ | 
| 81 | {1, 0xF, 0xFD, | 
| 82 | /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */ | 
| 83 | 0x19, 5, 45} | 
| 84 | }; | 
| 85 | |
| 86 | static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = { | 
| 87 | /* Min Sclk pcie DeepSleep Activity CgSpll CgSpll spllSpread SpllSpread CcPwr CcPwr Sclk Display Enabled Enabled Voltage Power */ | 
| 88 | /* Voltage, Frequency, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, Spectrum, Spectrum2, DynRm, DynRm1 Did, Watermark, ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */ | 
| 89 | { 0x3c0fd047, 0x30750000, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0x21680000, 0x0c000000, 0, 0, 0x16, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, | 
| 90 | { 0xa00fd047, 0x409c0000, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0x21680000, 0x11000000, 0, 0, 0x16, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, | 
| 91 | { 0x0410d047, 0x50c30000, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0x21680000, 0x0d000000, 0, 0, 0x0e, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, | 
| 92 | { 0x6810d047, 0x60ea0000, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0x21680000, 0x0e000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, | 
| 93 | { 0xcc10d047, 0xe8fd0000, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0x21680000, 0x0f000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, | 
| 94 | { 0x3011d047, 0x70110100, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0x21680000, 0x10000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, | 
| 95 | { 0x9411d047, 0xf8240100, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0x21680000, 0x11000000, 0, 0, 0x0c, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }, | 
| 96 | { 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 } | 
| 97 | }; | 
| 98 | |
| 99 | static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) | 
| 100 | { | 
| 101 | int result = 0; | 
| 102 | |
| 103 | /* Wait for smc boot up */ | 
| 104 | /* PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, | 
| 105 | RCU_UC_EVENTS, boot_seq_done, 0); */ | 
| 106 | |
| 107 | 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((1) << 0x0)))))  | 
| 108 | 			SMC_SYSCON_RESET_CNTL, rst_reg, 1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((1) << 0x0)))));  | 
| 109 | |
| 110 | result = smu7_upload_smu_firmware_image(hwmgr); | 
| 111 | if (result) | 
| 112 | return result; | 
| 113 | |
| 114 | /* Clear status */ | 
| 115 | 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe0003088,0))  | 
| 116 | 			ixSMU_STATUS, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe0003088,0));  | 
| 117 | |
| 118 | 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000004,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000004))) & ~0x1) | (0x1 & ((0) << 0x0)))))  | 
| 119 | 			SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000004,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000004))) & ~0x1) | (0x1 & ((0) << 0x0)))));  | 
| 120 | |
| 121 | /* De-assert reset */ | 
| 122 | 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((0) << 0x0)))))  | 
| 123 | 			SMC_SYSCON_RESET_CNTL, rst_reg, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((0) << 0x0)))));  | 
| 124 | |
| 125 | /* Wait for ROM firmware to initialize interrupt hendler */ | 
| 126 | /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, SMC_IND, | 
| 127 | SMC_INTR_CNTL_MASK_0, 0x10040, 0xFFFFFFFF); */ | 
| 128 | |
| 129 | /* Set SMU Auto Start */ | 
| 130 | 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe00030b8,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xe00030b8))) & ~0x80000000) | (0x80000000 & ((1) << 0x1f)))))  | 
| 131 | 			SMU_INPUT_DATA, AUTO_START, 1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe00030b8,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xe00030b8))) & ~0x80000000) | (0x80000000 & ((1) << 0x1f)))));  | 
| 132 | |
| 133 | /* Clear firmware interrupt enable flag */ | 
| 134 | 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x3f000,0))  | 
| 135 | 			ixFIRMWARE_FLAGS, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x3f000,0));  | 
| 136 | |
| 137 | 	PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,phm_wait_on_indirect_register(hwmgr, 0x1AC, 0xc0000004, (1) << 0x10, 0x10000)  | 
| 138 | 			INTERRUPTS_ENABLED, 1)phm_wait_on_indirect_register(hwmgr, 0x1AC, 0xc0000004, (1) << 0x10, 0x10000);  | 
| 139 | |
| 140 | smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test((uint16_t) 0x100), 0x20000, NULL((void *)0)); | 
| 141 | |
| 142 | /* Wait for done bit to be set */ | 
| 143 | 	PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,phm_wait_for_indirect_register_unequal(hwmgr, 0x1AC, 0xe0003088 , (0) << 0x0, 0x1)  | 
| 144 | 			SMU_STATUS, SMU_DONE, 0)phm_wait_for_indirect_register_unequal(hwmgr, 0x1AC, 0xe0003088 , (0) << 0x0, 0x1);  | 
| 145 | |
| 146 | /* Check pass/failed indicator */ | 
| 147 | 	if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe0003088))) & 0x2) >> 0x1)  | 
| 148 | 			SMU_STATUS, SMU_PASS)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe0003088))) & 0x2) >> 0x1) != 1) {  | 
| 149 | 		PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "SMU Firmware start failed!" ); return -1; } } while (0)  | 
| 150 | 				"SMU Firmware start failed!", return -1)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "SMU Firmware start failed!" ); return -1; } } while (0);  | 
| 151 | } | 
| 152 | |
| 153 | /* Wait for firmware to initialize */ | 
| 154 | 	PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,phm_wait_on_indirect_register(hwmgr, 0x1AC, 0x3f000, (1) << 0x0, 0x1)  | 
| 155 | 			FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1)phm_wait_on_indirect_register(hwmgr, 0x1AC, 0x3f000, (1) << 0x0, 0x1);  | 
| 156 | |
| 157 | return result; | 
| 158 | } | 
| 159 | |
| 160 | static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) | 
| 161 | { | 
| 162 | int result = 0; | 
| 163 | |
| 164 | /* wait for smc boot up */ | 
| 165 | 	PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,phm_wait_for_indirect_register_unequal(hwmgr, 0x1AC, 0xc0000004 , (0) << 0x7, 0x80)  | 
| 166 | 			RCU_UC_EVENTS, boot_seq_done, 0)phm_wait_for_indirect_register_unequal(hwmgr, 0x1AC, 0xc0000004 , (0) << 0x7, 0x80);  | 
| 167 | |
| 168 | /* Clear firmware interrupt enable flag */ | 
| 169 | 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x3f000,0))  | 
| 170 | 			ixFIRMWARE_FLAGS, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x3f000,0));  | 
| 171 | |
| 172 | /* Assert reset */ | 
| 173 | 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((1) << 0x0)))))  | 
| 174 | 			SMC_SYSCON_RESET_CNTL, rst_reg, 1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((1) << 0x0)))));  | 
| 175 | |
| 176 | result = smu7_upload_smu_firmware_image(hwmgr); | 
| 177 | if (result) | 
| 178 | return result; | 
| 179 | |
| 180 | /* Set smc instruct start point at 0x0 */ | 
| 181 | smu7_program_jump_on_start(hwmgr); | 
| 182 | |
| 183 | /* Enable clock */ | 
| 184 | 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000004,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000004))) & ~0x1) | (0x1 & ((0) << 0x0)))))  | 
| 185 | 			SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000004,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000004))) & ~0x1) | (0x1 & ((0) << 0x0)))));  | 
| 186 | |
| 187 | /* De-assert reset */ | 
| 188 | 	PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((0) << 0x0)))))  | 
| 189 | 			SMC_SYSCON_RESET_CNTL, rst_reg, 0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x80000000,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0x80000000))) & ~0x1) | (0x1 & ((0) << 0x0)))));  | 
| 190 | |
| 191 | /* Wait for firmware to initialize */ | 
| 192 | 	PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,phm_wait_on_indirect_register(hwmgr, 0x1AC, 0x3f000, (1) << 0x0, 0x1)  | 
| 193 | 			FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1)phm_wait_on_indirect_register(hwmgr, 0x1AC, 0x3f000, (1) << 0x0, 0x1);  | 
| 194 | |
| 195 | return result; | 
| 196 | } | 
| 197 | |
| 198 | static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr) | 
| 199 | { | 
| 200 | int result = 0; | 
| 201 | struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); | 
| 202 | |
| 203 | if (0 != smu_data->avfs_btc_param) { | 
| 204 | if (0 != smum_send_msg_to_smc_with_parameter(hwmgr, | 
| 205 | PPSMC_MSG_PerformBtc((uint16_t) 0x26C), smu_data->avfs_btc_param, | 
| 206 | NULL((void *)0))) { | 
| 207 | pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed")do { } while(0); | 
| 208 | result = -EINVAL22; | 
| 209 | } | 
| 210 | } | 
| 211 | /* Soft-Reset to reset the engine before loading uCode */ | 
| 212 | /* halt */ | 
| 213 | 	cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000)(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0x208d,0x50000000));  | 
| 214 | /* reset everything */ | 
| 215 | 	cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff)(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0x2008,0xffffffff));  | 
| 216 | /* clear reset */ | 
| 217 | 	cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0)(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0x2008,0));  | 
| 218 | |
| 219 | return result; | 
| 220 | } | 
| 221 | |
| 222 | static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr) | 
| 223 | { | 
| 224 | int32_t vr_config; | 
| 225 | uint32_t table_start; | 
| 226 | uint32_t level_addr, vr_config_addr; | 
| 227 | uint32_t level_size = sizeof(avfs_graphics_level); | 
| 228 | |
| 229 | 	PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr,do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0)  | 
| 230 | 			SMU7_FIRMWARE_HEADER_LOCATION +do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0)  | 
| 231 | 			offsetof(SMU73_Firmware_Header, DpmTable),do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0)  | 
| 232 | 			&table_start, 0x40000),do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0)  | 
| 233 | 			"[AVFS][Fiji_SetupGfxLvlStruct] SMU could not "do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0)  | 
| 234 | 			"communicate starting address of DPM table",do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0)  | 
| 235 | 			return -1;)do { if (!(0 == smu7_read_smc_sram_dword(hwmgr, 0x20000 + __builtin_offsetof (SMU73_Firmware_Header, DpmTable), &table_start, 0x40000) )) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not " "communicate starting address of DPM table"); return -1;; } } while (0);  | 
| 236 | |
| 237 | /* Default value for vr_config = | 
| 238 | * VR_MERGED_WITH_VDDC + VR_STATIC_VOLTAGE(VDDCI) */ | 
| 239 | vr_config = 0x01000500; /* Real value:0x50001 */ | 
| 240 | |
| 241 | vr_config_addr = table_start + | 
| 242 | offsetof(SMU73_Discrete_DpmTable, VRConfig)__builtin_offsetof(SMU73_Discrete_DpmTable, VRConfig); | 
| 243 | |
| 244 | 	PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr,do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC"); return -1;; } } while (0)  | 
| 245 | 			(uint8_t *)&vr_config, sizeof(int32_t), 0x40000),do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC"); return -1;; } } while (0)  | 
| 246 | 			"[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC"); return -1;; } } while (0)  | 
| 247 | 			"vr_config value over to SMC",do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC"); return -1;; } } while (0)  | 
| 248 | 			return -1;)do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC"); return -1;; } } while (0);  | 
| 249 | |
| 250 | level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); | 
| 251 | |
| 252 | 	PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr,do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!" ); return -1;; } } while (0)  | 
| 253 | 			(uint8_t *)(&avfs_graphics_level), level_size, 0x40000),do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!" ); return -1;; } } while (0)  | 
| 254 | 			"[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!" ); return -1;; } } while (0)  | 
| 255 | 			return -1;)do { if (!(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000))) { printk ("\0014" "amdgpu: " "%s\n", "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!" ); return -1;; } } while (0);  | 
| 256 | |
| 257 | return 0; | 
| 258 | } | 
| 259 | |
| 260 | static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr) | 
| 261 | { | 
| 262 | if (!hwmgr->avfs_supported) | 
| 263 | return 0; | 
| 264 | |
| 265 | 	PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),do { if (!(0 == fiji_setup_graphics_level_structure(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU"); return -22; } } while (0)  | 
| 266 | 			"[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"do { if (!(0 == fiji_setup_graphics_level_structure(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU"); return -22; } } while (0)  | 
| 267 | 			" table over to SMU",do { if (!(0 == fiji_setup_graphics_level_structure(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU"); return -22; } } while (0)  | 
| 268 | 			return -EINVAL)do { if (!(0 == fiji_setup_graphics_level_structure(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU"); return -22; } } while (0);  | 
| 269 | 	PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),do { if (!(0 == smu7_setup_pwr_virus(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS "); return -22; } } while (0)  | 
| 270 | 			"[AVFS][fiji_avfs_event_mgr] Could not setup "do { if (!(0 == smu7_setup_pwr_virus(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS "); return -22; } } while (0)  | 
| 271 | 			"Pwr Virus for AVFS ",do { if (!(0 == smu7_setup_pwr_virus(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS "); return -22; } } while (0)  | 
| 272 | 			return -EINVAL)do { if (!(0 == smu7_setup_pwr_virus(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS "); return -22; } } while (0);  | 
| 273 | 	PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),do { if (!(0 == fiji_start_avfs_btc(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled"); return -22; } } while (0)  | 
| 274 | 			"[AVFS][fiji_avfs_event_mgr] Failure at "do { if (!(0 == fiji_start_avfs_btc(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled"); return -22; } } while (0)  | 
| 275 | 			"fiji_start_avfs_btc. AVFS Disabled",do { if (!(0 == fiji_start_avfs_btc(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled"); return -22; } } while (0)  | 
| 276 | 			return -EINVAL)do { if (!(0 == fiji_start_avfs_btc(hwmgr))) { printk("\0014" "amdgpu: " "%s\n", "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled"); return -22; } } while (0);  | 
| 277 | |
| 278 | return 0; | 
| 279 | } | 
| 280 | |
| 281 | static int fiji_start_smu(struct pp_hwmgr *hwmgr) | 
| 282 | { | 
| 283 | int result = 0; | 
| 284 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 285 | |
| 286 | /* Only start SMC if SMC RAM is not running */ | 
| 287 | if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) { | 
| 288 | /* Check if SMU is running in protected mode */ | 
| 289 | 		if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe00030a4))) & 0x10000 ) >> 0x10)  | 
| 290 | 				CGS_IND_REG__SMC,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe00030a4))) & 0x10000 ) >> 0x10)  | 
| 291 | 				SMU_FIRMWARE, SMU_MODE)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xe00030a4))) & 0x10000 ) >> 0x10)) {  | 
| 292 | result = fiji_start_smu_in_non_protection_mode(hwmgr); | 
| 293 | if (result) | 
| 294 | return result; | 
| 295 | } else { | 
| 296 | result = fiji_start_smu_in_protection_mode(hwmgr); | 
| 297 | if (result) | 
| 298 | return result; | 
| 299 | } | 
| 300 | if (fiji_avfs_event_mgr(hwmgr)) | 
| 301 | hwmgr->avfs_supported = false0; | 
| 302 | } | 
| 303 | |
| 304 | /* Setup SoftRegsStart here for register lookup in case | 
| 305 | * DummyBackEnd is used and ProcessFirmwareHeader is not executed | 
| 306 | */ | 
| 307 | smu7_read_smc_sram_dword(hwmgr, | 
| 308 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + | 
| 309 | offsetof(SMU73_Firmware_Header, SoftRegisters)__builtin_offsetof(SMU73_Firmware_Header, SoftRegisters), | 
| 310 | &(priv->smu7_data.soft_regs_start), 0x40000); | 
| 311 | |
| 312 | result = smu7_request_smu_load_fw(hwmgr); | 
| 313 | |
| 314 | return result; | 
| 315 | } | 
| 316 | |
| 317 | static bool_Bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr) | 
| 318 | { | 
| 319 | |
| 320 | uint32_t efuse = 0; | 
| 321 | uint32_t mask = (1 << ((AVFS_EN_MSB1568 - AVFS_EN_LSB1568) + 1)) - 1; | 
| 322 | |
| 323 | if (!hwmgr->not_vf) | 
| 324 | return false0; | 
| 325 | |
| 326 | if (!atomctrl_read_efuse(hwmgr, AVFS_EN_LSB1568, AVFS_EN_MSB1568, | 
| 327 | mask, &efuse)) { | 
| 328 | if (efuse) | 
| 329 | return true1; | 
| 330 | } | 
| 331 | return false0; | 
| 332 | } | 
| 333 | |
| 334 | static int fiji_smu_init(struct pp_hwmgr *hwmgr) | 
| 335 | { | 
| 336 | struct fiji_smumgr *fiji_priv = NULL((void *)0); | 
| 337 | |
| 338 | fiji_priv = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL(0x0001 | 0x0004)); | 
| 339 | |
| 340 | if (fiji_priv == NULL((void *)0)) | 
| 341 | return -ENOMEM12; | 
| 342 | |
| 343 | hwmgr->smu_backend = fiji_priv; | 
| 344 | |
| 345 | if (smu7_init(hwmgr)) { | 
| 346 | kfree(fiji_priv); | 
| 347 | return -EINVAL22; | 
| 348 | } | 
| 349 | |
| 350 | return 0; | 
| 351 | } | 
| 352 | |
| 353 | static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, | 
| 354 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_table, | 
| 355 | uint32_t clock, uint32_t *voltage, uint32_t *mvdd) | 
| 356 | { | 
| 357 | uint32_t i; | 
| 358 | uint16_t vddci; | 
| 359 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 360 | *voltage = *mvdd = 0; | 
| 361 | |
| 362 | |
| 363 | /* clock - voltage dependency table is empty table */ | 
| 364 | if (dep_table->count == 0) | 
| 365 | return -EINVAL22; | 
| 366 | |
| 367 | for (i = 0; i < dep_table->count; i++) { | 
| 368 | /* find first sclk bigger than request */ | 
| 369 | if (dep_table->entries[i].clk >= clock) { | 
| 370 | *voltage |= (dep_table->entries[i].vddc * | 
| 371 | VOLTAGE_SCALE4) << VDDC_SHIFT0; | 
| 372 | if (SMU7_VOLTAGE_CONTROL_NONE0x0 == data->vddci_control) | 
| 373 | *voltage |= (data->vbios_boot_state.vddci_bootup_value * | 
| 374 | VOLTAGE_SCALE4) << VDDCI_SHIFT15; | 
| 375 | else if (dep_table->entries[i].vddci) | 
| 376 | *voltage |= (dep_table->entries[i].vddci * | 
| 377 | VOLTAGE_SCALE4) << VDDCI_SHIFT15; | 
| 378 | else { | 
| 379 | vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), | 
| 380 | (dep_table->entries[i].vddc - | 
| 381 | VDDC_VDDCI_DELTA300)); | 
| 382 | *voltage |= (vddci * VOLTAGE_SCALE4) << VDDCI_SHIFT15; | 
| 383 | } | 
| 384 | |
| 385 | if (SMU7_VOLTAGE_CONTROL_NONE0x0 == data->mvdd_control) | 
| 386 | *mvdd = data->vbios_boot_state.mvdd_bootup_value * | 
| 387 | VOLTAGE_SCALE4; | 
| 388 | else if (dep_table->entries[i].mvdd) | 
| 389 | *mvdd = (uint32_t) dep_table->entries[i].mvdd * | 
| 390 | VOLTAGE_SCALE4; | 
| 391 | |
| 392 | *voltage |= 1 << PHASES_SHIFT30; | 
| 393 | return 0; | 
| 394 | } | 
| 395 | } | 
| 396 | |
| 397 | /* sclk is bigger than max sclk in the dependence table */ | 
| 398 | *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE4) << VDDC_SHIFT0; | 
| 399 | |
| 400 | if (SMU7_VOLTAGE_CONTROL_NONE0x0 == data->vddci_control) | 
| 401 | *voltage |= (data->vbios_boot_state.vddci_bootup_value * | 
| 402 | VOLTAGE_SCALE4) << VDDCI_SHIFT15; | 
| 403 | else if (dep_table->entries[i-1].vddci) { | 
| 404 | vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), | 
| 405 | (dep_table->entries[i].vddc - | 
| 406 | VDDC_VDDCI_DELTA300)); | 
| 407 | *voltage |= (vddci * VOLTAGE_SCALE4) << VDDCI_SHIFT15; | 
| 408 | } | 
| 409 | |
| 410 | if (SMU7_VOLTAGE_CONTROL_NONE0x0 == data->mvdd_control) | 
| 411 | *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE4; | 
| 412 | else if (dep_table->entries[i].mvdd) | 
| 413 | *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE4; | 
| 414 | |
| 415 | return 0; | 
| 416 | } | 
| 417 | |
| 418 | |
| 419 | static uint16_t scale_fan_gain_settings(uint16_t raw_setting) | 
| 420 | { | 
| 421 | uint32_t tmp; | 
| 422 | tmp = raw_setting * 4096 / 100; | 
| 423 | return (uint16_t)tmp; | 
| 424 | } | 
| 425 | |
| 426 | static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda) | 
| 427 | { | 
| 428 | switch (line) { | 
| 429 | case SMU7_I2CLineID_DDC1: | 
| 430 | *scl = SMU7_I2C_DDC1CLK1; | 
| 431 | *sda = SMU7_I2C_DDC1DATA0; | 
| 432 | break; | 
| 433 | case SMU7_I2CLineID_DDC2: | 
| 434 | *scl = SMU7_I2C_DDC2CLK3; | 
| 435 | *sda = SMU7_I2C_DDC2DATA2; | 
| 436 | break; | 
| 437 | case SMU7_I2CLineID_DDC3: | 
| 438 | *scl = SMU7_I2C_DDC3CLK5; | 
| 439 | *sda = SMU7_I2C_DDC3DATA4; | 
| 440 | break; | 
| 441 | case SMU7_I2CLineID_DDC4: | 
| 442 | *scl = SMU7_I2C_DDC4CLK66; | 
| 443 | *sda = SMU7_I2C_DDC4DATA65; | 
| 444 | break; | 
| 445 | case SMU7_I2CLineID_DDC5: | 
| 446 | *scl = SMU7_I2C_DDC5CLK0x49; | 
| 447 | *sda = SMU7_I2C_DDC5DATA0x48; | 
| 448 | break; | 
| 449 | case SMU7_I2CLineID_DDC6: | 
| 450 | *scl = SMU7_I2C_DDC6CLK0x4b; | 
| 451 | *sda = SMU7_I2C_DDC6DATA0x4a; | 
| 452 | break; | 
| 453 | case SMU7_I2CLineID_SCLSDA: | 
| 454 | *scl = SMU7_I2C_SCL41; | 
| 455 | *sda = SMU7_I2C_SDA40; | 
| 456 | break; | 
| 457 | case SMU7_I2CLineID_DDCVGA: | 
| 458 | *scl = SMU7_I2C_DDCVGACLK0x4d; | 
| 459 | *sda = SMU7_I2C_DDCVGADATA0x4c; | 
| 460 | break; | 
| 461 | default: | 
| 462 | *scl = 0; | 
| 463 | *sda = 0; | 
| 464 | break; | 
| 465 | } | 
| 466 | } | 
| 467 | |
| 468 | static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) | 
| 469 | { | 
| 470 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 471 | struct phm_ppt_v1_information *table_info = | 
| 472 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 473 | |
| 474 | if (table_info && | 
| 475 | table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX1 && | 
| 476 | table_info->cac_dtp_table->usPowerTuneDataSetID) | 
| 477 | smu_data->power_tune_defaults = | 
| 478 | &fiji_power_tune_data_set_array | 
| 479 | [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; | 
| 480 | else | 
| 481 | smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0]; | 
| 482 | |
| 483 | } | 
| 484 | |
| 485 | static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) | 
| 486 | { | 
| 487 | |
| 488 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 489 | const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; | 
| 490 | |
| 491 | SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); | 
| 492 | |
| 493 | struct phm_ppt_v1_information *table_info = | 
| 494 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 495 | struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; | 
| 496 | struct pp_advance_fan_control_parameters *fan_table = | 
| 497 | &hwmgr->thermal_controller.advanceFanControlParameters; | 
| 498 | uint8_t uc_scl, uc_sda; | 
| 499 | |
| 500 | /* TDP number of fraction bits are changed from 8 to 7 for Fiji | 
| 501 | * as requested by SMC team | 
| 502 | */ | 
| 503 | 	dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p((uint16_t)(cac_dtp_table-> usTDP * 128)) ? (__uint16_t)(((__uint16_t)((uint16_t)(cac_dtp_table ->usTDP * 128)) & 0xffU) << 8 | ((__uint16_t)((uint16_t )(cac_dtp_table->usTDP * 128)) & 0xff00U) >> 8) : __swap16md((uint16_t)(cac_dtp_table->usTDP * 128)))  | 
| 504 | 			(uint16_t)(cac_dtp_table->usTDP * 128))(__uint16_t)(__builtin_constant_p((uint16_t)(cac_dtp_table-> usTDP * 128)) ? (__uint16_t)(((__uint16_t)((uint16_t)(cac_dtp_table ->usTDP * 128)) & 0xffU) << 8 | ((__uint16_t)((uint16_t )(cac_dtp_table->usTDP * 128)) & 0xff00U) >> 8) : __swap16md((uint16_t)(cac_dtp_table->usTDP * 128)));  | 
| 505 | 	dpm_table->TargetTdp = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p((uint16_t)(cac_dtp_table-> usTDP * 128)) ? (__uint16_t)(((__uint16_t)((uint16_t)(cac_dtp_table ->usTDP * 128)) & 0xffU) << 8 | ((__uint16_t)((uint16_t )(cac_dtp_table->usTDP * 128)) & 0xff00U) >> 8) : __swap16md((uint16_t)(cac_dtp_table->usTDP * 128)))  | 
| 506 | 			(uint16_t)(cac_dtp_table->usTDP * 128))(__uint16_t)(__builtin_constant_p((uint16_t)(cac_dtp_table-> usTDP * 128)) ? (__uint16_t)(((__uint16_t)((uint16_t)(cac_dtp_table ->usTDP * 128)) & 0xffU) << 8 | ((__uint16_t)((uint16_t )(cac_dtp_table->usTDP * 128)) & 0xff00U) >> 8) : __swap16md((uint16_t)(cac_dtp_table->usTDP * 128)));  | 
| 507 | |
| 508 | 	PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,do { if (!(cac_dtp_table->usTargetOperatingTemp <= 255) ) { printk("\0014" "amdgpu: " "%s\n", "Target Operating Temp is out of Range!" ); ; } } while (0)  | 
| 509 | 			"Target Operating Temp is out of Range!",do { if (!(cac_dtp_table->usTargetOperatingTemp <= 255) ) { printk("\0014" "amdgpu: " "%s\n", "Target Operating Temp is out of Range!" ); ; } } while (0)  | 
| 510 | 			)do { if (!(cac_dtp_table->usTargetOperatingTemp <= 255) ) { printk("\0014" "amdgpu: " "%s\n", "Target Operating Temp is out of Range!" ); ; } } while (0);  | 
| 511 | |
| 512 | dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); | 
| 513 | dpm_table->GpuTjHyst = 8; | 
| 514 | |
| 515 | dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase; | 
| 516 | |
| 517 | /* The following are for new Fiji Multi-input fan/thermal control */ | 
| 518 | 	dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTargetOperatingTemp * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTargetOperatingTemp * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTargetOperatingTemp * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTargetOperatingTemp * 256))  | 
| 519 | 			cac_dtp_table->usTargetOperatingTemp * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTargetOperatingTemp * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTargetOperatingTemp * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTargetOperatingTemp * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTargetOperatingTemp * 256));  | 
| 520 | 	dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitHotspot * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitHotspot * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitHotspot * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitHotspot * 256))  | 
| 521 | 			cac_dtp_table->usTemperatureLimitHotspot * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitHotspot * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitHotspot * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitHotspot * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitHotspot * 256));  | 
| 522 | 	dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitLiquid1 * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitLiquid1 * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitLiquid1 * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitLiquid1 * 256))  | 
| 523 | 			cac_dtp_table->usTemperatureLimitLiquid1 * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitLiquid1 * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitLiquid1 * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitLiquid1 * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitLiquid1 * 256));  | 
| 524 | 	dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitLiquid2 * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitLiquid2 * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitLiquid2 * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitLiquid2 * 256))  | 
| 525 | 			cac_dtp_table->usTemperatureLimitLiquid2 * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitLiquid2 * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitLiquid2 * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitLiquid2 * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitLiquid2 * 256));  | 
| 526 | 	dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitVrVddc * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitVrVddc * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitVrVddc * 256) & 0xff00U) >> 8 ) : __swap16md(cac_dtp_table->usTemperatureLimitVrVddc * 256 ))  | 
| 527 | 			cac_dtp_table->usTemperatureLimitVrVddc * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitVrVddc * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitVrVddc * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitVrVddc * 256) & 0xff00U) >> 8 ) : __swap16md(cac_dtp_table->usTemperatureLimitVrVddc * 256 ));  | 
| 528 | 	dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitVrMvdd * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitVrMvdd * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitVrMvdd * 256) & 0xff00U) >> 8 ) : __swap16md(cac_dtp_table->usTemperatureLimitVrMvdd * 256 ))  | 
| 529 | 			cac_dtp_table->usTemperatureLimitVrMvdd * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitVrMvdd * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitVrMvdd * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitVrMvdd * 256) & 0xff00U) >> 8 ) : __swap16md(cac_dtp_table->usTemperatureLimitVrMvdd * 256 ));  | 
| 530 | 	dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitPlx * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitPlx * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitPlx * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitPlx * 256))  | 
| 531 | 			cac_dtp_table->usTemperatureLimitPlx * 256)(__uint16_t)(__builtin_constant_p(cac_dtp_table->usTemperatureLimitPlx * 256) ? (__uint16_t)(((__uint16_t)(cac_dtp_table->usTemperatureLimitPlx * 256) & 0xffU) << 8 | ((__uint16_t)(cac_dtp_table ->usTemperatureLimitPlx * 256) & 0xff00U) >> 8) : __swap16md(cac_dtp_table->usTemperatureLimitPlx * 256));  | 
| 532 | |
| 533 | 	dpm_table->FanGainEdge = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainEdge)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainEdge)) & 0xffU) << 8 | ((__uint16_t )(scale_fan_gain_settings(fan_table->usFanGainEdge)) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings(fan_table ->usFanGainEdge)))  | 
| 534 | 			scale_fan_gain_settings(fan_table->usFanGainEdge))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainEdge)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainEdge)) & 0xffU) << 8 | ((__uint16_t )(scale_fan_gain_settings(fan_table->usFanGainEdge)) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings(fan_table ->usFanGainEdge)));  | 
| 535 | 	dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainHotspot)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainHotspot)) & 0xffU) << 8 | ( (__uint16_t)(scale_fan_gain_settings(fan_table->usFanGainHotspot )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainHotspot)))  | 
| 536 | 			scale_fan_gain_settings(fan_table->usFanGainHotspot))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainHotspot)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainHotspot)) & 0xffU) << 8 | ( (__uint16_t)(scale_fan_gain_settings(fan_table->usFanGainHotspot )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainHotspot)));  | 
| 537 | 	dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainLiquid)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainLiquid)) & 0xffU) << 8 | (( __uint16_t)(scale_fan_gain_settings(fan_table->usFanGainLiquid )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainLiquid)))  | 
| 538 | 			scale_fan_gain_settings(fan_table->usFanGainLiquid))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainLiquid)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainLiquid)) & 0xffU) << 8 | (( __uint16_t)(scale_fan_gain_settings(fan_table->usFanGainLiquid )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainLiquid)));  | 
| 539 | 	dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainVrVddc)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainVrVddc)) & 0xffU) << 8 | (( __uint16_t)(scale_fan_gain_settings(fan_table->usFanGainVrVddc )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainVrVddc)))  | 
| 540 | 			scale_fan_gain_settings(fan_table->usFanGainVrVddc))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainVrVddc)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainVrVddc)) & 0xffU) << 8 | (( __uint16_t)(scale_fan_gain_settings(fan_table->usFanGainVrVddc )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainVrVddc)));  | 
| 541 | 	dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainVrMvdd)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainVrMvdd)) & 0xffU) << 8 | (( __uint16_t)(scale_fan_gain_settings(fan_table->usFanGainVrMvdd )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainVrMvdd)))  | 
| 542 | 			scale_fan_gain_settings(fan_table->usFanGainVrMvdd))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainVrMvdd)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainVrMvdd)) & 0xffU) << 8 | (( __uint16_t)(scale_fan_gain_settings(fan_table->usFanGainVrMvdd )) & 0xff00U) >> 8) : __swap16md(scale_fan_gain_settings (fan_table->usFanGainVrMvdd)));  | 
| 543 | 	dpm_table->FanGainPlx = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainPlx)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainPlx)) & 0xffU) << 8 | ((__uint16_t )(scale_fan_gain_settings(fan_table->usFanGainPlx)) & 0xff00U ) >> 8) : __swap16md(scale_fan_gain_settings(fan_table-> usFanGainPlx)))  | 
| 544 | 			scale_fan_gain_settings(fan_table->usFanGainPlx))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainPlx)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainPlx)) & 0xffU) << 8 | ((__uint16_t )(scale_fan_gain_settings(fan_table->usFanGainPlx)) & 0xff00U ) >> 8) : __swap16md(scale_fan_gain_settings(fan_table-> usFanGainPlx)));  | 
| 545 | 	dpm_table->FanGainHbm = PP_HOST_TO_SMC_US((__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainHbm)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainHbm)) & 0xffU) << 8 | ((__uint16_t )(scale_fan_gain_settings(fan_table->usFanGainHbm)) & 0xff00U ) >> 8) : __swap16md(scale_fan_gain_settings(fan_table-> usFanGainHbm)))  | 
| 546 | 			scale_fan_gain_settings(fan_table->usFanGainHbm))(__uint16_t)(__builtin_constant_p(scale_fan_gain_settings(fan_table ->usFanGainHbm)) ? (__uint16_t)(((__uint16_t)(scale_fan_gain_settings (fan_table->usFanGainHbm)) & 0xffU) << 8 | ((__uint16_t )(scale_fan_gain_settings(fan_table->usFanGainHbm)) & 0xff00U ) >> 8) : __swap16md(scale_fan_gain_settings(fan_table-> usFanGainHbm)));  | 
| 547 | |
| 548 | dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address; | 
| 549 | dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address; | 
| 550 | dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address; | 
| 551 | dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address; | 
| 552 | |
| 553 | get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda); | 
| 554 | dpm_table->Liquid_I2C_LineSCL = uc_scl; | 
| 555 | dpm_table->Liquid_I2C_LineSDA = uc_sda; | 
| 556 | |
| 557 | get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda); | 
| 558 | dpm_table->Vr_I2C_LineSCL = uc_scl; | 
| 559 | dpm_table->Vr_I2C_LineSDA = uc_sda; | 
| 560 | |
| 561 | get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda); | 
| 562 | dpm_table->Plx_I2C_LineSCL = uc_scl; | 
| 563 | dpm_table->Plx_I2C_LineSDA = uc_sda; | 
| 564 | |
| 565 | return 0; | 
| 566 | } | 
| 567 | |
| 568 | |
| 569 | static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) | 
| 570 | { | 
| 571 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 572 | const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; | 
| 573 | |
| 574 | smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; | 
| 575 | smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; | 
| 576 | smu_data->power_tune_table.SviLoadLineTrimVddC = 3; | 
| 577 | smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; | 
| 578 | |
| 579 | return 0; | 
| 580 | } | 
| 581 | |
| 582 | |
| 583 | static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) | 
| 584 | { | 
| 585 | uint16_t tdc_limit; | 
| 586 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 587 | struct phm_ppt_v1_information *table_info = | 
| 588 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 589 | const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; | 
| 590 | |
| 591 | /* TDC number of fraction bits are changed from 8 to 7 | 
| 592 | * for Fiji as requested by SMC team | 
| 593 | */ | 
| 594 | tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); | 
| 595 | smu_data->power_tune_table.TDC_VDDC_PkgLimit = | 
| 596 | 			CONVERT_FROM_HOST_TO_SMC_US(tdc_limit)((tdc_limit) = (__uint16_t)(__builtin_constant_p(tdc_limit) ? (__uint16_t)(((__uint16_t)(tdc_limit) & 0xffU) << 8 | ((__uint16_t)(tdc_limit) & 0xff00U) >> 8) : __swap16md (tdc_limit)));  | 
| 597 | smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = | 
| 598 | defaults->TDC_VDDC_ThrottleReleaseLimitPerc; | 
| 599 | smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; | 
| 600 | |
| 601 | return 0; | 
| 602 | } | 
| 603 | |
| 604 | static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) | 
| 605 | { | 
| 606 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 607 | const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; | 
| 608 | uint32_t temp; | 
| 609 | |
| 610 | if (smu7_read_smc_sram_dword(hwmgr, | 
| 611 | fuse_table_offset + | 
| 612 | offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl)__builtin_offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl), | 
| 613 | (uint32_t *)&temp, SMC_RAM_END0x40000)) | 
| 614 | 		PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!" ); return -22; } } while (0)  | 
| 615 | 				"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!" ); return -22; } } while (0)  | 
| 616 | 				return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!" ); return -22; } } while (0);  | 
| 617 | else { | 
| 618 | smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; | 
| 619 | smu_data->power_tune_table.LPMLTemperatureMin = | 
| 620 | (uint8_t)((temp >> 16) & 0xff); | 
| 621 | smu_data->power_tune_table.LPMLTemperatureMax = | 
| 622 | (uint8_t)((temp >> 8) & 0xff); | 
| 623 | smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); | 
| 624 | } | 
| 625 | return 0; | 
| 626 | } | 
| 627 | |
| 628 | static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) | 
| 629 | { | 
| 630 | int i; | 
| 631 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 632 | |
| 633 | /* Currently not used. Set all to zero. */ | 
| 634 | for (i = 0; i < 16; i++) | 
| 635 | smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0; | 
| 636 | |
| 637 | return 0; | 
| 638 | } | 
| 639 | |
| 640 | static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) | 
| 641 | { | 
| 642 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 643 | |
| 644 | if ((hwmgr->thermal_controller.advanceFanControlParameters. | 
| 645 | usFanOutputSensitivity & (1 << 15)) || | 
| 646 | 0 == hwmgr->thermal_controller.advanceFanControlParameters. | 
| 647 | usFanOutputSensitivity) | 
| 648 | hwmgr->thermal_controller.advanceFanControlParameters. | 
| 649 | usFanOutputSensitivity = hwmgr->thermal_controller. | 
| 650 | advanceFanControlParameters.usDefaultFanOutputSensitivity; | 
| 651 | |
| 652 | smu_data->power_tune_table.FuzzyFan_PwmSetDelta = | 
| 653 | 			PP_HOST_TO_SMC_US(hwmgr->thermal_controller.(__uint16_t)(__builtin_constant_p(hwmgr->thermal_controller . advanceFanControlParameters.usFanOutputSensitivity) ? (__uint16_t )(((__uint16_t)(hwmgr->thermal_controller. advanceFanControlParameters .usFanOutputSensitivity) & 0xffU) << 8 | ((__uint16_t )(hwmgr->thermal_controller. advanceFanControlParameters.usFanOutputSensitivity ) & 0xff00U) >> 8) : __swap16md(hwmgr->thermal_controller . advanceFanControlParameters.usFanOutputSensitivity))  | 
| 654 | 					advanceFanControlParameters.usFanOutputSensitivity)(__uint16_t)(__builtin_constant_p(hwmgr->thermal_controller . advanceFanControlParameters.usFanOutputSensitivity) ? (__uint16_t )(((__uint16_t)(hwmgr->thermal_controller. advanceFanControlParameters .usFanOutputSensitivity) & 0xffU) << 8 | ((__uint16_t )(hwmgr->thermal_controller. advanceFanControlParameters.usFanOutputSensitivity ) & 0xff00U) >> 8) : __swap16md(hwmgr->thermal_controller . advanceFanControlParameters.usFanOutputSensitivity));  | 
| 655 | return 0; | 
| 656 | } | 
| 657 | |
| 658 | static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) | 
| 659 | { | 
| 660 | int i; | 
| 661 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 662 | |
| 663 | /* Currently not used. Set all to zero. */ | 
| 664 | for (i = 0; i < 16; i++) | 
| 665 | smu_data->power_tune_table.GnbLPML[i] = 0; | 
| 666 | |
| 667 | return 0; | 
| 668 | } | 
| 669 | |
| 670 | static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) | 
| 671 | { | 
| 672 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 673 | struct phm_ppt_v1_information *table_info = | 
| 674 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 675 | uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; | 
| 676 | uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; | 
| 677 | struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; | 
| 678 | |
| 679 | HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); | 
| 680 | LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); | 
| 681 | |
| 682 | smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = | 
| 683 | 			CONVERT_FROM_HOST_TO_SMC_US(HiSidd)((HiSidd) = (__uint16_t)(__builtin_constant_p(HiSidd) ? (__uint16_t )(((__uint16_t)(HiSidd) & 0xffU) << 8 | ((__uint16_t )(HiSidd) & 0xff00U) >> 8) : __swap16md(HiSidd)));  | 
| 684 | smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = | 
| 685 | 			CONVERT_FROM_HOST_TO_SMC_US(LoSidd)((LoSidd) = (__uint16_t)(__builtin_constant_p(LoSidd) ? (__uint16_t )(((__uint16_t)(LoSidd) & 0xffU) << 8 | ((__uint16_t )(LoSidd) & 0xff00U) >> 8) : __swap16md(LoSidd)));  | 
| 686 | |
| 687 | return 0; | 
| 688 | } | 
| 689 | |
| 690 | static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) | 
| 691 | { | 
| 692 | uint32_t pm_fuse_table_offset; | 
| 693 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 694 | |
| 695 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 
| 696 | PHM_PlatformCaps_PowerContainment)) { | 
| 697 | if (smu7_read_smc_sram_dword(hwmgr, | 
| 698 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + | 
| 699 | offsetof(SMU73_Firmware_Header, PmFuseTable)__builtin_offsetof(SMU73_Firmware_Header, PmFuseTable), | 
| 700 | &pm_fuse_table_offset, SMC_RAM_END0x40000)) | 
| 701 | 			PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to get pm_fuse_table_offset Failed!" ); return -22; } } while (0)  | 
| 702 | 					"Attempt to get pm_fuse_table_offset Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to get pm_fuse_table_offset Failed!" ); return -22; } } while (0)  | 
| 703 | 					return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to get pm_fuse_table_offset Failed!" ); return -22; } } while (0);  | 
| 704 | |
| 705 | /* DW6 */ | 
| 706 | if (fiji_populate_svi_load_line(hwmgr)) | 
| 707 | 			PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate SviLoadLine Failed!" ); return -22; } } while (0)  | 
| 708 | 					"Attempt to populate SviLoadLine Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate SviLoadLine Failed!" ); return -22; } } while (0)  | 
| 709 | 					return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate SviLoadLine Failed!" ); return -22; } } while (0);  | 
| 710 | /* DW7 */ | 
| 711 | if (fiji_populate_tdc_limit(hwmgr)) | 
| 712 | 			PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TDCLimit Failed!" ); return -22; } } while (0)  | 
| 713 | 					"Attempt to populate TDCLimit Failed!", return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TDCLimit Failed!" ); return -22; } } while (0);  | 
| 714 | /* DW8 */ | 
| 715 | if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset)) | 
| 716 | 			PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TdcWaterfallCtl, " "LPMLTemperature Min and Max Failed!"); return -22; } } while (0)  | 
| 717 | 					"Attempt to populate TdcWaterfallCtl, "do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TdcWaterfallCtl, " "LPMLTemperature Min and Max Failed!"); return -22; } } while (0)  | 
| 718 | 					"LPMLTemperature Min and Max Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TdcWaterfallCtl, " "LPMLTemperature Min and Max Failed!"); return -22; } } while (0)  | 
| 719 | 					return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate TdcWaterfallCtl, " "LPMLTemperature Min and Max Failed!"); return -22; } } while (0);  | 
| 720 | |
| 721 | /* DW9-DW12 */ | 
| 722 | if (0 != fiji_populate_temperature_scaler(hwmgr)) | 
| 723 | 			PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate LPMLTemperatureScaler Failed!" ); return -22; } } while (0)  | 
| 724 | 					"Attempt to populate LPMLTemperatureScaler Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate LPMLTemperatureScaler Failed!" ); return -22; } } while (0)  | 
| 725 | 					return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate LPMLTemperatureScaler Failed!" ); return -22; } } while (0);  | 
| 726 | |
| 727 | /* DW13-DW14 */ | 
| 728 | if (fiji_populate_fuzzy_fan(hwmgr)) | 
| 729 | 			PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate Fuzzy Fan Control parameters Failed!" ); return -22; } } while (0)  | 
| 730 | 					"Attempt to populate Fuzzy Fan Control parameters Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate Fuzzy Fan Control parameters Failed!" ); return -22; } } while (0)  | 
| 731 | 					return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate Fuzzy Fan Control parameters Failed!" ); return -22; } } while (0);  | 
| 732 | |
| 733 | /* DW15-DW18 */ | 
| 734 | if (fiji_populate_gnb_lpml(hwmgr)) | 
| 735 | 			PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate GnbLPML Failed!" ); return -22; } } while (0)  | 
| 736 | 					"Attempt to populate GnbLPML Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate GnbLPML Failed!" ); return -22; } } while (0)  | 
| 737 | 					return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate GnbLPML Failed!" ); return -22; } } while (0);  | 
| 738 | |
| 739 | /* DW20 */ | 
| 740 | if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr)) | 
| 741 | 			PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!"); return -22; } } while (0)  | 
| 742 | 					"Attempt to populate BapmVddCBaseLeakage Hi and Lo "do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!"); return -22; } } while (0)  | 
| 743 | 					"Sidd Failed!", return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!"); return -22; } } while (0);  | 
| 744 | |
| 745 | if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, | 
| 746 | (uint8_t *)&smu_data->power_tune_table, | 
| 747 | sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END0x40000)) | 
| 748 | 			PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to download PmFuseTable Failed!" ); return -22; } } while (0)  | 
| 749 | 					"Attempt to download PmFuseTable Failed!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to download PmFuseTable Failed!" ); return -22; } } while (0)  | 
| 750 | 					return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Attempt to download PmFuseTable Failed!" ); return -22; } } while (0);  | 
| 751 | } | 
| 752 | return 0; | 
| 753 | } | 
| 754 | |
| 755 | static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, | 
| 756 | struct SMU73_Discrete_DpmTable *table) | 
| 757 | { | 
| 758 | uint32_t count; | 
| 759 | uint8_t index; | 
| 760 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 761 | struct phm_ppt_v1_information *table_info = | 
| 762 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 763 | struct phm_ppt_v1_voltage_lookup_table *lookup_table = | 
| 764 | table_info->vddc_lookup_table; | 
| 765 | /* tables is already swapped, so in order to use the value from it, | 
| 766 | * we need to swap it back. | 
| 767 | * We are populating vddc CAC data to BapmVddc table | 
| 768 | * in split and merged mode | 
| 769 | */ | 
| 770 | |
| 771 | for (count = 0; count < lookup_table->count; count++) { | 
| 772 | index = phm_get_voltage_index(lookup_table, | 
| 773 | data->vddc_voltage_table.entries[count].value); | 
| 774 | table->BapmVddcVidLoSidd[count] = | 
| 775 | convert_to_vid(lookup_table->entries[index].us_cac_low); | 
| 776 | table->BapmVddcVidHiSidd[count] = | 
| 777 | convert_to_vid(lookup_table->entries[index].us_cac_high); | 
| 778 | } | 
| 779 | |
| 780 | return 0; | 
| 781 | } | 
| 782 | |
| 783 | static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, | 
| 784 | struct SMU73_Discrete_DpmTable *table) | 
| 785 | { | 
| 786 | int result; | 
| 787 | |
| 788 | result = fiji_populate_cac_table(hwmgr, table); | 
| 789 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate CAC voltage tables to SMC" ); return -22; } } while (0)  | 
| 790 | 			"can not populate CAC voltage tables to SMC",do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate CAC voltage tables to SMC" ); return -22; } } while (0)  | 
| 791 | 			return -EINVAL)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "can not populate CAC voltage tables to SMC" ); return -22; } } while (0);  | 
| 792 | |
| 793 | return 0; | 
| 794 | } | 
| 795 | |
| 796 | static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr, | 
| 797 | struct SMU73_Discrete_Ulv *state) | 
| 798 | { | 
| 799 | int result = 0; | 
| 800 | |
| 801 | struct phm_ppt_v1_information *table_info = | 
| 802 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 803 | |
| 804 | state->CcPwrDynRm = 0; | 
| 805 | state->CcPwrDynRm1 = 0; | 
| 806 | |
| 807 | state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; | 
| 808 | state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * | 
| 809 | VOLTAGE_VID_OFFSET_SCALE2100 / VOLTAGE_VID_OFFSET_SCALE1625); | 
| 810 | |
| 811 | state->VddcPhase = 1; | 
| 812 | |
| 813 | if (!result) { | 
| 814 | 		CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm)((state->CcPwrDynRm) = (__uint32_t)(__builtin_constant_p(state ->CcPwrDynRm) ? (__uint32_t)(((__uint32_t)(state->CcPwrDynRm ) & 0xff) << 24 | ((__uint32_t)(state->CcPwrDynRm ) & 0xff00) << 8 | ((__uint32_t)(state->CcPwrDynRm ) & 0xff0000) >> 8 | ((__uint32_t)(state->CcPwrDynRm ) & 0xff000000) >> 24) : __swap32md(state->CcPwrDynRm )));  | 
| 815 | 		CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1)((state->CcPwrDynRm1) = (__uint32_t)(__builtin_constant_p( state->CcPwrDynRm1) ? (__uint32_t)(((__uint32_t)(state-> CcPwrDynRm1) & 0xff) << 24 | ((__uint32_t)(state-> CcPwrDynRm1) & 0xff00) << 8 | ((__uint32_t)(state-> CcPwrDynRm1) & 0xff0000) >> 8 | ((__uint32_t)(state ->CcPwrDynRm1) & 0xff000000) >> 24) : __swap32md (state->CcPwrDynRm1)));  | 
| 816 | 		CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset)((state->VddcOffset) = (__uint16_t)(__builtin_constant_p(state ->VddcOffset) ? (__uint16_t)(((__uint16_t)(state->VddcOffset ) & 0xffU) << 8 | ((__uint16_t)(state->VddcOffset ) & 0xff00U) >> 8) : __swap16md(state->VddcOffset )));  | 
| 817 | } | 
| 818 | return result; | 
| 819 | } | 
| 820 | |
| 821 | static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr, | 
| 822 | struct SMU73_Discrete_DpmTable *table) | 
| 823 | { | 
| 824 | return fiji_populate_ulv_level(hwmgr, &table->Ulv); | 
| 825 | } | 
| 826 | |
| 827 | static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr, | 
| 828 | struct SMU73_Discrete_DpmTable *table) | 
| 829 | { | 
| 830 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 831 | struct smu7_dpm_table *dpm_table = &data->dpm_table; | 
| 832 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 833 | int i; | 
| 834 | |
| 835 | /* Index (dpm_table->pcie_speed_table.count) | 
| 836 | * is reserved for PCIE boot level. */ | 
| 837 | for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { | 
| 838 | table->LinkLevel[i].PcieGenSpeed = | 
| 839 | (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; | 
| 840 | table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( | 
| 841 | dpm_table->pcie_speed_table.dpm_levels[i].param1); | 
| 842 | table->LinkLevel[i].EnabledForActivity = 1; | 
| 843 | table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); | 
| 844 | 		table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5)(__uint32_t)(__builtin_constant_p(5) ? (__uint32_t)(((__uint32_t )(5) & 0xff) << 24 | ((__uint32_t)(5) & 0xff00) << 8 | ((__uint32_t)(5) & 0xff0000) >> 8 | ( (__uint32_t)(5) & 0xff000000) >> 24) : __swap32md(5 ));  | 
| 845 | 		table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30)(__uint32_t)(__builtin_constant_p(30) ? (__uint32_t)(((__uint32_t )(30) & 0xff) << 24 | ((__uint32_t)(30) & 0xff00 ) << 8 | ((__uint32_t)(30) & 0xff0000) >> 8 | ((__uint32_t)(30) & 0xff000000) >> 24) : __swap32md (30));  | 
| 846 | } | 
| 847 | |
| 848 | smu_data->smc_state_table.LinkLevelCount = | 
| 849 | (uint8_t)dpm_table->pcie_speed_table.count; | 
| 850 | data->dpm_level_enable_mask.pcie_dpm_enable_mask = | 
| 851 | phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); | 
| 852 | |
| 853 | return 0; | 
| 854 | } | 
| 855 | |
| 856 | static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr, | 
| 857 | uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk) | 
| 858 | { | 
| 859 | const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 860 | struct pp_atomctrl_clock_dividers_vi dividers; | 
| 861 | uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; | 
| 862 | uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; | 
| 863 | uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; | 
| 864 | uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; | 
| 865 | uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; | 
| 866 | uint32_t ref_clock; | 
| 867 | uint32_t ref_divider; | 
| 868 | uint32_t fbdiv; | 
| 869 | int result; | 
| 870 | |
| 871 | /* get the engine clock dividers for this clock value */ | 
| 872 | result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs); | 
| 873 | |
| 874 | 	PP_ASSERT_WITH_CODE(result == 0,do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS." ); return result; } } while (0)  | 
| 875 | 			"Error retrieving Engine Clock dividers from VBIOS.",do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS." ); return result; } } while (0)  | 
| 876 | 			return result)do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS." ); return result; } } while (0);  | 
| 877 | |
| 878 | /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */ | 
| 879 | ref_clock = atomctrl_get_reference_clock(hwmgr); | 
| 880 | ref_divider = 1 + dividers.uc_pll_ref_div; | 
| 881 | |
| 882 | /* low 14 bits is fraction and high 12 bits is divider */ | 
| 883 | fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; | 
| 884 | |
| 885 | /* SPLL_FUNC_CNTL setup */ | 
| 886 | 	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,(((spll_func_cntl) & ~0x7e0) | (0x7e0 & ((dividers.uc_pll_ref_div ) << 0x5)))  | 
| 887 | 			SPLL_REF_DIV, dividers.uc_pll_ref_div)(((spll_func_cntl) & ~0x7e0) | (0x7e0 & ((dividers.uc_pll_ref_div ) << 0x5)));  | 
| 888 | 	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,(((spll_func_cntl) & ~0x7f00000) | (0x7f00000 & ((dividers .uc_pll_post_div) << 0x14)))  | 
| 889 | 			SPLL_PDIV_A,  dividers.uc_pll_post_div)(((spll_func_cntl) & ~0x7f00000) | (0x7f00000 & ((dividers .uc_pll_post_div) << 0x14)));  | 
| 890 | |
| 891 | /* SPLL_FUNC_CNTL_3 setup*/ | 
| 892 | 	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,(((spll_func_cntl_3) & ~0x3ffffff) | (0x3ffffff & ((fbdiv ) << 0x0)))  | 
| 893 | 			SPLL_FB_DIV, fbdiv)(((spll_func_cntl_3) & ~0x3ffffff) | (0x3ffffff & ((fbdiv ) << 0x0)));  | 
| 894 | |
| 895 | /* set to use fractional accumulation*/ | 
| 896 | 	spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,(((spll_func_cntl_3) & ~0x10000000) | (0x10000000 & ( (1) << 0x1c)))  | 
| 897 | 			SPLL_DITHEN, 1)(((spll_func_cntl_3) & ~0x10000000) | (0x10000000 & ( (1) << 0x1c)));  | 
| 898 | |
| 899 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 
| 900 | PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { | 
| 901 | struct pp_atomctrl_internal_ss_info ssInfo; | 
| 902 | |
| 903 | uint32_t vco_freq = clock * dividers.uc_pll_post_div; | 
| 904 | if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr, | 
| 905 | vco_freq, &ssInfo)) { | 
| 906 | /* | 
| 907 | * ss_info.speed_spectrum_percentage -- in unit of 0.01% | 
| 908 | * ss_info.speed_spectrum_rate -- in unit of khz | 
| 909 | * | 
| 910 | * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 | 
| 911 | */ | 
| 912 | uint32_t clk_s = ref_clock * 5 / | 
| 913 | (ref_divider * ssInfo.speed_spectrum_rate); | 
| 914 | /* clkv = 2 * D * fbdiv / NS */ | 
| 915 | uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage * | 
| 916 | fbdiv / (clk_s * 10000); | 
| 917 | |
| 918 | 			cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,(((cg_spll_spread_spectrum) & ~0xfff0) | (0xfff0 & (( clk_s) << 0x4)))  | 
| 919 | 					CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s)(((cg_spll_spread_spectrum) & ~0xfff0) | (0xfff0 & (( clk_s) << 0x4)));  | 
| 920 | 			cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,(((cg_spll_spread_spectrum) & ~0x1) | (0x1 & ((1) << 0x0)))  | 
| 921 | 					CG_SPLL_SPREAD_SPECTRUM, SSEN, 1)(((cg_spll_spread_spectrum) & ~0x1) | (0x1 & ((1) << 0x0)));  | 
| 922 | 			cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,(((cg_spll_spread_spectrum_2) & ~0x3ffffff) | (0x3ffffff & ((clk_v) << 0x0)))  | 
| 923 | 					CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v)(((cg_spll_spread_spectrum_2) & ~0x3ffffff) | (0x3ffffff & ((clk_v) << 0x0)));  | 
| 924 | } | 
| 925 | } | 
| 926 | |
| 927 | sclk->SclkFrequency = clock; | 
| 928 | sclk->CgSpllFuncCntl3 = spll_func_cntl_3; | 
| 929 | sclk->CgSpllFuncCntl4 = spll_func_cntl_4; | 
| 930 | sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; | 
| 931 | sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; | 
| 932 | sclk->SclkDid = (uint8_t)dividers.pll_post_divider; | 
| 933 | |
| 934 | return 0; | 
| 935 | } | 
| 936 | |
| 937 | static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, | 
| 938 | uint32_t clock, struct SMU73_Discrete_GraphicsLevel *level) | 
| 939 | { | 
| 940 | int result; | 
| 941 | /* PP_Clocks minClocks; */ | 
| 942 | uint32_t mvdd; | 
| 943 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 944 | struct phm_ppt_v1_information *table_info = | 
| 945 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 946 | phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL((void *)0); | 
| 947 | |
| 948 | result = fiji_calculate_sclk_params(hwmgr, clock, level); | 
| 949 | |
| 950 | if (hwmgr->od_enabled) | 
| 951 | vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_sclk; | 
| 952 | else | 
| 953 | vdd_dep_table = table_info->vdd_dep_on_sclk; | 
| 954 | |
| 955 | /* populate graphics levels */ | 
| 956 | result = fiji_get_dependency_volt_by_clk(hwmgr, | 
| 957 | vdd_dep_table, clock, | 
| 958 | (uint32_t *)(&level->MinVoltage), &mvdd); | 
| 959 | 	PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find VDDC voltage value for " "VDDC engine clock dependency table" ); return result; } } while (0)  | 
| 960 | 			"can not find VDDC voltage value for "do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find VDDC voltage value for " "VDDC engine clock dependency table" ); return result; } } while (0)  | 
| 961 | 			"VDDC engine clock dependency table",do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find VDDC voltage value for " "VDDC engine clock dependency table" ); return result; } } while (0)  | 
| 962 | 			return result)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find VDDC voltage value for " "VDDC engine clock dependency table" ); return result; } } while (0);  | 
| 963 | |
| 964 | level->SclkFrequency = clock; | 
| 965 | level->ActivityLevel = data->current_profile_setting.sclk_activity; | 
| 966 | level->CcPwrDynRm = 0; | 
| 967 | level->CcPwrDynRm1 = 0; | 
| 968 | level->EnabledForActivity = 0; | 
| 969 | level->EnabledForThrottle = 1; | 
| 970 | level->UpHyst = data->current_profile_setting.sclk_up_hyst; | 
| 971 | level->DownHyst = data->current_profile_setting.sclk_down_hyst; | 
| 972 | level->VoltageDownHyst = 0; | 
| 973 | level->PowerThrottle = 0; | 
| 974 | |
| 975 | data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr; | 
| 976 | |
| 977 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) | 
| 978 | level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock, | 
| 979 | hwmgr->display_config->min_core_set_clock_in_sr); | 
| 980 | |
| 981 | |
| 982 | /* Default to slow, highest DPM level will be | 
| 983 | * set to PPSMC_DISPLAY_WATERMARK_LOW later. | 
| 984 | */ | 
| 985 | level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW0; | 
| 986 | |
| 987 | 	CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage)((level->MinVoltage) = (__uint32_t)(__builtin_constant_p(level ->MinVoltage) ? (__uint32_t)(((__uint32_t)(level->MinVoltage ) & 0xff) << 24 | ((__uint32_t)(level->MinVoltage ) & 0xff00) << 8 | ((__uint32_t)(level->MinVoltage ) & 0xff0000) >> 8 | ((__uint32_t)(level->MinVoltage ) & 0xff000000) >> 24) : __swap32md(level->MinVoltage )));  | 
| 988 | 	CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency)((level->SclkFrequency) = (__uint32_t)(__builtin_constant_p (level->SclkFrequency) ? (__uint32_t)(((__uint32_t)(level-> SclkFrequency) & 0xff) << 24 | ((__uint32_t)(level-> SclkFrequency) & 0xff00) << 8 | ((__uint32_t)(level ->SclkFrequency) & 0xff0000) >> 8 | ((__uint32_t )(level->SclkFrequency) & 0xff000000) >> 24) : __swap32md (level->SclkFrequency)));  | 
| 989 | 	CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel)((level->ActivityLevel) = (__uint16_t)(__builtin_constant_p (level->ActivityLevel) ? (__uint16_t)(((__uint16_t)(level-> ActivityLevel) & 0xffU) << 8 | ((__uint16_t)(level-> ActivityLevel) & 0xff00U) >> 8) : __swap16md(level-> ActivityLevel)));  | 
| 990 | 	CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3)((level->CgSpllFuncCntl3) = (__uint32_t)(__builtin_constant_p (level->CgSpllFuncCntl3) ? (__uint32_t)(((__uint32_t)(level ->CgSpllFuncCntl3) & 0xff) << 24 | ((__uint32_t) (level->CgSpllFuncCntl3) & 0xff00) << 8 | ((__uint32_t )(level->CgSpllFuncCntl3) & 0xff0000) >> 8 | ((__uint32_t )(level->CgSpllFuncCntl3) & 0xff000000) >> 24) : __swap32md(level->CgSpllFuncCntl3)));  | 
| 991 | 	CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4)((level->CgSpllFuncCntl4) = (__uint32_t)(__builtin_constant_p (level->CgSpllFuncCntl4) ? (__uint32_t)(((__uint32_t)(level ->CgSpllFuncCntl4) & 0xff) << 24 | ((__uint32_t) (level->CgSpllFuncCntl4) & 0xff00) << 8 | ((__uint32_t )(level->CgSpllFuncCntl4) & 0xff0000) >> 8 | ((__uint32_t )(level->CgSpllFuncCntl4) & 0xff000000) >> 24) : __swap32md(level->CgSpllFuncCntl4)));  | 
| 992 | 	CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum)((level->SpllSpreadSpectrum) = (__uint32_t)(__builtin_constant_p (level->SpllSpreadSpectrum) ? (__uint32_t)(((__uint32_t)(level ->SpllSpreadSpectrum) & 0xff) << 24 | ((__uint32_t )(level->SpllSpreadSpectrum) & 0xff00) << 8 | (( __uint32_t)(level->SpllSpreadSpectrum) & 0xff0000) >> 8 | ((__uint32_t)(level->SpllSpreadSpectrum) & 0xff000000 ) >> 24) : __swap32md(level->SpllSpreadSpectrum)));  | 
| 993 | 	CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2)((level->SpllSpreadSpectrum2) = (__uint32_t)(__builtin_constant_p (level->SpllSpreadSpectrum2) ? (__uint32_t)(((__uint32_t)( level->SpllSpreadSpectrum2) & 0xff) << 24 | ((__uint32_t )(level->SpllSpreadSpectrum2) & 0xff00) << 8 | ( (__uint32_t)(level->SpllSpreadSpectrum2) & 0xff0000) >> 8 | ((__uint32_t)(level->SpllSpreadSpectrum2) & 0xff000000 ) >> 24) : __swap32md(level->SpllSpreadSpectrum2)));  | 
| 994 | 	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm)((level->CcPwrDynRm) = (__uint32_t)(__builtin_constant_p(level ->CcPwrDynRm) ? (__uint32_t)(((__uint32_t)(level->CcPwrDynRm ) & 0xff) << 24 | ((__uint32_t)(level->CcPwrDynRm ) & 0xff00) << 8 | ((__uint32_t)(level->CcPwrDynRm ) & 0xff0000) >> 8 | ((__uint32_t)(level->CcPwrDynRm ) & 0xff000000) >> 24) : __swap32md(level->CcPwrDynRm )));  | 
| 995 | 	CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1)((level->CcPwrDynRm1) = (__uint32_t)(__builtin_constant_p( level->CcPwrDynRm1) ? (__uint32_t)(((__uint32_t)(level-> CcPwrDynRm1) & 0xff) << 24 | ((__uint32_t)(level-> CcPwrDynRm1) & 0xff00) << 8 | ((__uint32_t)(level-> CcPwrDynRm1) & 0xff0000) >> 8 | ((__uint32_t)(level ->CcPwrDynRm1) & 0xff000000) >> 24) : __swap32md (level->CcPwrDynRm1)));  | 
| 996 | |
| 997 | return 0; | 
| 998 | } | 
| 999 | |
| 1000 | static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) | 
| 1001 | { | 
| 1002 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 1003 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 1004 | |
| 1005 | struct smu7_dpm_table *dpm_table = &data->dpm_table; | 
| 1006 | struct phm_ppt_v1_information *table_info = | 
| 1007 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 1008 | struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; | 
| 1009 | uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count; | 
| 1010 | int result = 0; | 
| 1011 | uint32_t array = smu_data->smu7_data.dpm_table_start + | 
| 1012 | offsetof(SMU73_Discrete_DpmTable, GraphicsLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); | 
| 1013 | uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) * | 
| 1014 | SMU73_MAX_LEVELS_GRAPHICS8; | 
| 1015 | struct SMU73_Discrete_GraphicsLevel *levels = | 
| 1016 | smu_data->smc_state_table.GraphicsLevel; | 
| 1017 | uint32_t i, max_entry; | 
| 1018 | uint8_t hightest_pcie_level_enabled = 0, | 
| 1019 | lowest_pcie_level_enabled = 0, | 
| 1020 | mid_pcie_level_enabled = 0, | 
| 1021 | count = 0; | 
| 1022 | |
| 1023 | for (i = 0; i < dpm_table->sclk_table.count; i++) { | 
| 1024 | result = fiji_populate_single_graphic_level(hwmgr, | 
| 1025 | dpm_table->sclk_table.dpm_levels[i].value, | 
| 1026 | &levels[i]); | 
| 1027 | if (result) | 
| 1028 | return result; | 
| 1029 | |
| 1030 | /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ | 
| 1031 | if (i > 1) | 
| 1032 | levels[i].DeepSleepDivId = 0; | 
| 1033 | } | 
| 1034 | |
| 1035 | /* Only enable level 0 for now.*/ | 
| 1036 | levels[0].EnabledForActivity = 1; | 
| 1037 | |
| 1038 | /* set highest level watermark to high */ | 
| 1039 | levels[dpm_table->sclk_table.count - 1].DisplayWatermark = | 
| 1040 | PPSMC_DISPLAY_WATERMARK_HIGH1; | 
| 1041 | |
| 1042 | smu_data->smc_state_table.GraphicsDpmLevelCount = | 
| 1043 | (uint8_t)dpm_table->sclk_table.count; | 
| 1044 | data->dpm_level_enable_mask.sclk_dpm_enable_mask = | 
| 1045 | phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); | 
| 1046 | |
| 1047 | if (pcie_table != NULL((void *)0)) { | 
| 1048 | 		PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),do { if (!((1 <= pcie_entry_cnt))) { printk("\0014" "amdgpu: " "%s\n", "There must be 1 or more PCIE levels defined in PPTable." ); return -22; } } while (0)  | 
| 1049 | 				"There must be 1 or more PCIE levels defined in PPTable.",do { if (!((1 <= pcie_entry_cnt))) { printk("\0014" "amdgpu: " "%s\n", "There must be 1 or more PCIE levels defined in PPTable." ); return -22; } } while (0)  | 
| 1050 | 				return -EINVAL)do { if (!((1 <= pcie_entry_cnt))) { printk("\0014" "amdgpu: " "%s\n", "There must be 1 or more PCIE levels defined in PPTable." ); return -22; } } while (0);  | 
| 1051 | max_entry = pcie_entry_cnt - 1; | 
| 1052 | for (i = 0; i < dpm_table->sclk_table.count; i++) | 
| 1053 | levels[i].pcieDpmLevel = | 
| 1054 | (uint8_t) ((i < max_entry) ? i : max_entry); | 
| 1055 | } else { | 
| 1056 | while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && | 
| 1057 | ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & | 
| 1058 | (1 << (hightest_pcie_level_enabled + 1))) != 0)) | 
| 1059 | hightest_pcie_level_enabled++; | 
| 1060 | |
| 1061 | while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && | 
| 1062 | ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & | 
| 1063 | (1 << lowest_pcie_level_enabled)) == 0)) | 
| 1064 | lowest_pcie_level_enabled++; | 
| 1065 | |
| 1066 | while ((count < hightest_pcie_level_enabled) && | 
| 1067 | ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & | 
| 1068 | (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) | 
| 1069 | count++; | 
| 1070 | |
| 1071 | mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) < | 
| 1072 | hightest_pcie_level_enabled ? | 
| 1073 | (lowest_pcie_level_enabled + 1 + count) : | 
| 1074 | hightest_pcie_level_enabled; | 
| 1075 | |
| 1076 | /* set pcieDpmLevel to hightest_pcie_level_enabled */ | 
| 1077 | for (i = 2; i < dpm_table->sclk_table.count; i++) | 
| 1078 | levels[i].pcieDpmLevel = hightest_pcie_level_enabled; | 
| 1079 | |
| 1080 | /* set pcieDpmLevel to lowest_pcie_level_enabled */ | 
| 1081 | levels[0].pcieDpmLevel = lowest_pcie_level_enabled; | 
| 1082 | |
| 1083 | /* set pcieDpmLevel to mid_pcie_level_enabled */ | 
| 1084 | levels[1].pcieDpmLevel = mid_pcie_level_enabled; | 
| 1085 | } | 
| 1086 | /* level count will send to smc once at init smc table and never change */ | 
| 1087 | result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, | 
| 1088 | (uint32_t)array_size, SMC_RAM_END0x40000); | 
| 1089 | |
| 1090 | return result; | 
| 1091 | } | 
| 1092 | |
| 1093 | |
| 1094 | /** | 
| 1095 | * MCLK Frequency Ratio | 
| 1096 | * SEQ_CG_RESP Bit[31:24] - 0x0 | 
| 1097 | * Bit[27:24] \96 DDR3 Frequency ratio | 
| 1098 | * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz | 
| 1099 | * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz | 
| 1100 | * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz | 
| 1101 | * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz | 
| 1102 | * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz | 
| 1103 | * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz | 
| 1104 | * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz | 
| 1105 | * 400 < 0x7 <= 450MHz, 800 < 0xF | 
| 1106 | */ | 
| 1107 | static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock) | 
| 1108 | { | 
| 1109 | if (mem_clock <= 10000) | 
| 1110 | return 0x0; | 
| 1111 | if (mem_clock <= 15000) | 
| 1112 | return 0x1; | 
| 1113 | if (mem_clock <= 20000) | 
| 1114 | return 0x2; | 
| 1115 | if (mem_clock <= 25000) | 
| 1116 | return 0x3; | 
| 1117 | if (mem_clock <= 30000) | 
| 1118 | return 0x4; | 
| 1119 | if (mem_clock <= 35000) | 
| 1120 | return 0x5; | 
| 1121 | if (mem_clock <= 40000) | 
| 1122 | return 0x6; | 
| 1123 | if (mem_clock <= 45000) | 
| 1124 | return 0x7; | 
| 1125 | if (mem_clock <= 50000) | 
| 1126 | return 0x8; | 
| 1127 | if (mem_clock <= 55000) | 
| 1128 | return 0x9; | 
| 1129 | if (mem_clock <= 60000) | 
| 1130 | return 0xa; | 
| 1131 | if (mem_clock <= 65000) | 
| 1132 | return 0xb; | 
| 1133 | if (mem_clock <= 70000) | 
| 1134 | return 0xc; | 
| 1135 | if (mem_clock <= 75000) | 
| 1136 | return 0xd; | 
| 1137 | if (mem_clock <= 80000) | 
| 1138 | return 0xe; | 
| 1139 | /* mem_clock > 800MHz */ | 
| 1140 | return 0xf; | 
| 1141 | } | 
| 1142 | |
| 1143 | static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr, | 
| 1144 | uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk) | 
| 1145 | { | 
| 1146 | struct pp_atomctrl_memory_clock_param mem_param; | 
| 1147 | int result; | 
| 1148 | |
| 1149 | result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param); | 
| 1150 | 	PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Failed to get Memory PLL Dividers."); ; } } while (0)  | 
| 1151 | 			"Failed to get Memory PLL Dividers.",do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Failed to get Memory PLL Dividers."); ; } } while (0)  | 
| 1152 | 			)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Failed to get Memory PLL Dividers."); ; } } while (0);  | 
| 1153 | |
| 1154 | /* Save the result data to outpupt memory level structure */ | 
| 1155 | mclk->MclkFrequency = clock; | 
| 1156 | mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider; | 
| 1157 | mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock); | 
| 1158 | |
| 1159 | return result; | 
| 1160 | } | 
| 1161 | |
| 1162 | static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr, | 
| 1163 | uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level) | 
| 1164 | { | 
| 1165 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 1166 | struct phm_ppt_v1_information *table_info = | 
| 1167 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 1168 | int result = 0; | 
| 1169 | uint32_t mclk_stutter_mode_threshold = 60000; | 
| 1170 | phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL((void *)0); | 
| 1171 | |
| 1172 | if (hwmgr->od_enabled) | 
| 1173 | vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk; | 
| 1174 | else | 
| 1175 | vdd_dep_table = table_info->vdd_dep_on_mclk; | 
| 1176 | |
| 1177 | if (vdd_dep_table) { | 
| 1178 | result = fiji_get_dependency_volt_by_clk(hwmgr, | 
| 1179 | vdd_dep_table, clock, | 
| 1180 | (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd); | 
| 1181 | 		PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find MinVddc voltage value from memory " "VDDC voltage dependency table" ); return result; } } while (0)  | 
| 1182 | 				"can not find MinVddc voltage value from memory "do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find MinVddc voltage value from memory " "VDDC voltage dependency table" ); return result; } } while (0)  | 
| 1183 | 				"VDDC voltage dependency table", return result)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find MinVddc voltage value from memory " "VDDC voltage dependency table" ); return result; } } while (0);  | 
| 1184 | } | 
| 1185 | |
| 1186 | mem_level->EnabledForThrottle = 1; | 
| 1187 | mem_level->EnabledForActivity = 0; | 
| 1188 | mem_level->UpHyst = data->current_profile_setting.mclk_up_hyst; | 
| 1189 | mem_level->DownHyst = data->current_profile_setting.mclk_down_hyst; | 
| 1190 | mem_level->VoltageDownHyst = 0; | 
| 1191 | mem_level->ActivityLevel = data->current_profile_setting.mclk_activity; | 
| 1192 | mem_level->StutterEnable = false0; | 
| 1193 | |
| 1194 | mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW0; | 
| 1195 | |
| 1196 | /* enable stutter mode if all the follow condition applied | 
| 1197 | * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI, | 
| 1198 | * &(data->DisplayTiming.numExistingDisplays)); | 
| 1199 | */ | 
| 1200 | data->display_timing.num_existing_displays = hwmgr->display_config->num_display; | 
| 1201 | data->display_timing.vrefresh = hwmgr->display_config->vrefresh; | 
| 1202 | |
| 1203 | if (mclk_stutter_mode_threshold && | 
| 1204 | (clock <= mclk_stutter_mode_threshold) && | 
| 1205 | (!data->is_uvd_enabled) && | 
| 1206 | 		(PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,((((((struct cgs_device *)hwmgr->device)->ops->read_register (hwmgr->device,0x1b35))) & 0x1) >> 0x0)  | 
| 1207 | 				STUTTER_ENABLE)((((((struct cgs_device *)hwmgr->device)->ops->read_register (hwmgr->device,0x1b35))) & 0x1) >> 0x0) & 0x1))  | 
| 1208 | mem_level->StutterEnable = true1; | 
| 1209 | |
| 1210 | result = fiji_calculate_mclk_params(hwmgr, clock, mem_level); | 
| 1211 | if (!result) { | 
| 1212 | 		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd)((mem_level->MinMvdd) = (__uint32_t)(__builtin_constant_p( mem_level->MinMvdd) ? (__uint32_t)(((__uint32_t)(mem_level ->MinMvdd) & 0xff) << 24 | ((__uint32_t)(mem_level ->MinMvdd) & 0xff00) << 8 | ((__uint32_t)(mem_level ->MinMvdd) & 0xff0000) >> 8 | ((__uint32_t)(mem_level ->MinMvdd) & 0xff000000) >> 24) : __swap32md(mem_level ->MinMvdd)));  | 
| 1213 | 		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency)((mem_level->MclkFrequency) = (__uint32_t)(__builtin_constant_p (mem_level->MclkFrequency) ? (__uint32_t)(((__uint32_t)(mem_level ->MclkFrequency) & 0xff) << 24 | ((__uint32_t)(mem_level ->MclkFrequency) & 0xff00) << 8 | ((__uint32_t)( mem_level->MclkFrequency) & 0xff0000) >> 8 | ((__uint32_t )(mem_level->MclkFrequency) & 0xff000000) >> 24) : __swap32md(mem_level->MclkFrequency)));  | 
| 1214 | 		CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel)((mem_level->ActivityLevel) = (__uint16_t)(__builtin_constant_p (mem_level->ActivityLevel) ? (__uint16_t)(((__uint16_t)(mem_level ->ActivityLevel) & 0xffU) << 8 | ((__uint16_t)(mem_level ->ActivityLevel) & 0xff00U) >> 8) : __swap16md(mem_level ->ActivityLevel)));  | 
| 1215 | 		CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage)((mem_level->MinVoltage) = (__uint32_t)(__builtin_constant_p (mem_level->MinVoltage) ? (__uint32_t)(((__uint32_t)(mem_level ->MinVoltage) & 0xff) << 24 | ((__uint32_t)(mem_level ->MinVoltage) & 0xff00) << 8 | ((__uint32_t)(mem_level ->MinVoltage) & 0xff0000) >> 8 | ((__uint32_t)(mem_level ->MinVoltage) & 0xff000000) >> 24) : __swap32md( mem_level->MinVoltage)));  | 
| 1216 | } | 
| 1217 | return result; | 
| 1218 | } | 
| 1219 | |
| 1220 | static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) | 
| 1221 | { | 
| 1222 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 1223 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 1224 | struct smu7_dpm_table *dpm_table = &data->dpm_table; | 
| 1225 | int result; | 
| 1226 | /* populate MCLK dpm table to SMU7 */ | 
| 1227 | uint32_t array = smu_data->smu7_data.dpm_table_start + | 
| 1228 | offsetof(SMU73_Discrete_DpmTable, MemoryLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, MemoryLevel); | 
| 1229 | uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) * | 
| 1230 | SMU73_MAX_LEVELS_MEMORY4; | 
| 1231 | struct SMU73_Discrete_MemoryLevel *levels = | 
| 1232 | smu_data->smc_state_table.MemoryLevel; | 
| 1233 | uint32_t i; | 
| 1234 | |
| 1235 | for (i = 0; i < dpm_table->mclk_table.count; i++) { | 
| 1236 | 		PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),do { if (!((0 != dpm_table->mclk_table.dpm_levels[i].value ))) { printk("\0014" "amdgpu: " "%s\n", "can not populate memory level as memory clock is zero" ); return -22; } } while (0)  | 
| 1237 | 				"can not populate memory level as memory clock is zero",do { if (!((0 != dpm_table->mclk_table.dpm_levels[i].value ))) { printk("\0014" "amdgpu: " "%s\n", "can not populate memory level as memory clock is zero" ); return -22; } } while (0)  | 
| 1238 | 				return -EINVAL)do { if (!((0 != dpm_table->mclk_table.dpm_levels[i].value ))) { printk("\0014" "amdgpu: " "%s\n", "can not populate memory level as memory clock is zero" ); return -22; } } while (0);  | 
| 1239 | result = fiji_populate_single_memory_level(hwmgr, | 
| 1240 | dpm_table->mclk_table.dpm_levels[i].value, | 
| 1241 | &levels[i]); | 
| 1242 | if (result) | 
| 1243 | return result; | 
| 1244 | } | 
| 1245 | |
| 1246 | /* Only enable level 0 for now. */ | 
| 1247 | levels[0].EnabledForActivity = 1; | 
| 1248 | |
| 1249 | /* in order to prevent MC activity from stutter mode to push DPM up. | 
| 1250 | * the UVD change complements this by putting the MCLK in | 
| 1251 | * a higher state by default such that we are not effected by | 
| 1252 | * up threshold or and MCLK DPM latency. | 
| 1253 | */ | 
| 1254 | levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; | 
| 1255 | 	CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel)((levels[0].ActivityLevel) = (__uint16_t)(__builtin_constant_p (levels[0].ActivityLevel) ? (__uint16_t)(((__uint16_t)(levels [0].ActivityLevel) & 0xffU) << 8 | ((__uint16_t)(levels [0].ActivityLevel) & 0xff00U) >> 8) : __swap16md(levels [0].ActivityLevel)));  | 
| 1256 | |
| 1257 | smu_data->smc_state_table.MemoryDpmLevelCount = | 
| 1258 | (uint8_t)dpm_table->mclk_table.count; | 
| 1259 | data->dpm_level_enable_mask.mclk_dpm_enable_mask = | 
| 1260 | phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); | 
| 1261 | /* set highest level watermark to high */ | 
| 1262 | levels[dpm_table->mclk_table.count - 1].DisplayWatermark = | 
| 1263 | PPSMC_DISPLAY_WATERMARK_HIGH1; | 
| 1264 | |
| 1265 | /* level count will send to smc once at init smc table and never change */ | 
| 1266 | result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, | 
| 1267 | (uint32_t)array_size, SMC_RAM_END0x40000); | 
| 1268 | |
| 1269 | return result; | 
| 1270 | } | 
| 1271 | |
| 1272 | static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr, | 
| 1273 | uint32_t mclk, SMIO_Pattern *smio_pat) | 
| 1274 | { | 
| 1275 | const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 1276 | struct phm_ppt_v1_information *table_info = | 
| 1277 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 1278 | uint32_t i = 0; | 
| 1279 | |
| 1280 | if (SMU7_VOLTAGE_CONTROL_NONE0x0 != data->mvdd_control) { | 
| 1281 | /* find mvdd value which clock is more than request */ | 
| 1282 | for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { | 
| 1283 | if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { | 
| 1284 | smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; | 
| 1285 | break; | 
| 1286 | } | 
| 1287 | } | 
| 1288 | 		PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,do { if (!(i < table_info->vdd_dep_on_mclk->count)) { printk("\0014" "amdgpu: " "%s\n", "MVDD Voltage is outside the supported range." ); return -22; } } while (0)  | 
| 1289 | 				"MVDD Voltage is outside the supported range.",do { if (!(i < table_info->vdd_dep_on_mclk->count)) { printk("\0014" "amdgpu: " "%s\n", "MVDD Voltage is outside the supported range." ); return -22; } } while (0)  | 
| 1290 | 				return -EINVAL)do { if (!(i < table_info->vdd_dep_on_mclk->count)) { printk("\0014" "amdgpu: " "%s\n", "MVDD Voltage is outside the supported range." ); return -22; } } while (0);  | 
| 1291 | } else | 
| 1292 | return -EINVAL22; | 
| 1293 | |
| 1294 | return 0; | 
| 1295 | } | 
| 1296 | |
| 1297 | static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, | 
| 1298 | SMU73_Discrete_DpmTable *table) | 
| 1299 | { | 
| 1300 | int result = 0; | 
| 1301 | const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 1302 | struct phm_ppt_v1_information *table_info = | 
| 1303 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 1304 | struct pp_atomctrl_clock_dividers_vi dividers; | 
| 1305 | SMIO_Pattern vol_level; | 
| 1306 | uint32_t mvdd; | 
| 1307 | uint16_t us_mvdd; | 
| 1308 | uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; | 
| 1309 | uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; | 
| 1310 | |
| 1311 | table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC0x01; | 
| 1312 | |
| 1313 | if (!data->sclk_dpm_key_disabled) { | 
| 1314 | /* Get MinVoltage and Frequency from DPM0, | 
| 1315 | * already converted to SMC_UL */ | 
| 1316 | table->ACPILevel.SclkFrequency = | 
| 1317 | data->dpm_table.sclk_table.dpm_levels[0].value; | 
| 1318 | result = fiji_get_dependency_volt_by_clk(hwmgr, | 
| 1319 | table_info->vdd_dep_on_sclk, | 
| 1320 | table->ACPILevel.SclkFrequency, | 
| 1321 | (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd); | 
| 1322 | 		PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDC voltage value " "in Clock Dependency Table" ); ; } } while (0)  | 
| 1323 | 				"Cannot find ACPI VDDC voltage value " \do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDC voltage value " "in Clock Dependency Table" ); ; } } while (0)  | 
| 1324 | 				"in Clock Dependency Table",do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDC voltage value " "in Clock Dependency Table" ); ; } } while (0)  | 
| 1325 | 				)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDC voltage value " "in Clock Dependency Table" ); ; } } while (0);  | 
| 1326 | } else { | 
| 1327 | table->ACPILevel.SclkFrequency = | 
| 1328 | data->vbios_boot_state.sclk_bootup_value; | 
| 1329 | table->ACPILevel.MinVoltage = | 
| 1330 | data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE4; | 
| 1331 | } | 
| 1332 | |
| 1333 | /* get the engine clock dividers for this clock value */ | 
| 1334 | result = atomctrl_get_engine_pll_dividers_vi(hwmgr, | 
| 1335 | table->ACPILevel.SclkFrequency, ÷rs); | 
| 1336 | 	PP_ASSERT_WITH_CODE(result == 0,do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS." ); return result; } } while (0)  | 
| 1337 | 			"Error retrieving Engine Clock dividers from VBIOS.",do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS." ); return result; } } while (0)  | 
| 1338 | 			return result)do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving Engine Clock dividers from VBIOS." ); return result; } } while (0);  | 
| 1339 | |
| 1340 | table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; | 
| 1341 | table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW0; | 
| 1342 | table->ACPILevel.DeepSleepDivId = 0; | 
| 1343 | |
| 1344 | 	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,(((spll_func_cntl) & ~0x2) | (0x2 & ((0) << 0x1 )))  | 
| 1345 | 			SPLL_PWRON, 0)(((spll_func_cntl) & ~0x2) | (0x2 & ((0) << 0x1 )));  | 
| 1346 | 	spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,(((spll_func_cntl) & ~0x1) | (0x1 & ((1) << 0x0 )))  | 
| 1347 | 			SPLL_RESET, 1)(((spll_func_cntl) & ~0x1) | (0x1 & ((1) << 0x0 )));  | 
| 1348 | 	spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,(((spll_func_cntl_2) & ~0x1ff) | (0x1ff & ((4) << 0x0)))  | 
| 1349 | 			SCLK_MUX_SEL, 4)(((spll_func_cntl_2) & ~0x1ff) | (0x1ff & ((4) << 0x0)));  | 
| 1350 | |
| 1351 | table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; | 
| 1352 | table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; | 
| 1353 | table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; | 
| 1354 | table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; | 
| 1355 | table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; | 
| 1356 | table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; | 
| 1357 | table->ACPILevel.CcPwrDynRm = 0; | 
| 1358 | table->ACPILevel.CcPwrDynRm1 = 0; | 
| 1359 | |
| 1360 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags)((table->ACPILevel.Flags) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.Flags) ? (__uint32_t)(((__uint32_t)(table ->ACPILevel.Flags) & 0xff) << 24 | ((__uint32_t) (table->ACPILevel.Flags) & 0xff00) << 8 | ((__uint32_t )(table->ACPILevel.Flags) & 0xff0000) >> 8 | ((__uint32_t )(table->ACPILevel.Flags) & 0xff000000) >> 24) : __swap32md(table->ACPILevel.Flags)));  | 
| 1361 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency)((table->ACPILevel.SclkFrequency) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.SclkFrequency) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.SclkFrequency) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.SclkFrequency) & 0xff00 ) << 8 | ((__uint32_t)(table->ACPILevel.SclkFrequency ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .SclkFrequency) & 0xff000000) >> 24) : __swap32md(table ->ACPILevel.SclkFrequency)));  | 
| 1362 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage)((table->ACPILevel.MinVoltage) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.MinVoltage) ? (__uint32_t)(((__uint32_t) (table->ACPILevel.MinVoltage) & 0xff) << 24 | (( __uint32_t)(table->ACPILevel.MinVoltage) & 0xff00) << 8 | ((__uint32_t)(table->ACPILevel.MinVoltage) & 0xff0000 ) >> 8 | ((__uint32_t)(table->ACPILevel.MinVoltage) & 0xff000000) >> 24) : __swap32md(table->ACPILevel.MinVoltage )));  | 
| 1363 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl)((table->ACPILevel.CgSpllFuncCntl) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.CgSpllFuncCntl) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.CgSpllFuncCntl) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl) & 0xff00 ) << 8 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .CgSpllFuncCntl) & 0xff000000) >> 24) : __swap32md( table->ACPILevel.CgSpllFuncCntl)));  | 
| 1364 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2)((table->ACPILevel.CgSpllFuncCntl2) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.CgSpllFuncCntl2) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.CgSpllFuncCntl2) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl2) & 0xff00 ) << 8 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl2 ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .CgSpllFuncCntl2) & 0xff000000) >> 24) : __swap32md (table->ACPILevel.CgSpllFuncCntl2)));  | 
| 1365 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3)((table->ACPILevel.CgSpllFuncCntl3) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.CgSpllFuncCntl3) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.CgSpllFuncCntl3) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl3) & 0xff00 ) << 8 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl3 ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .CgSpllFuncCntl3) & 0xff000000) >> 24) : __swap32md (table->ACPILevel.CgSpllFuncCntl3)));  | 
| 1366 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4)((table->ACPILevel.CgSpllFuncCntl4) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.CgSpllFuncCntl4) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.CgSpllFuncCntl4) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl4) & 0xff00 ) << 8 | ((__uint32_t)(table->ACPILevel.CgSpllFuncCntl4 ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .CgSpllFuncCntl4) & 0xff000000) >> 24) : __swap32md (table->ACPILevel.CgSpllFuncCntl4)));  | 
| 1367 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum)((table->ACPILevel.SpllSpreadSpectrum) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.SpllSpreadSpectrum) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.SpllSpreadSpectrum) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.SpllSpreadSpectrum) & 0xff00) << 8 | ((__uint32_t)(table->ACPILevel.SpllSpreadSpectrum ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .SpllSpreadSpectrum) & 0xff000000) >> 24) : __swap32md (table->ACPILevel.SpllSpreadSpectrum)));  | 
| 1368 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2)((table->ACPILevel.SpllSpreadSpectrum2) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.SpllSpreadSpectrum2) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.SpllSpreadSpectrum2) & 0xff) << 24 | ((__uint32_t)(table->ACPILevel.SpllSpreadSpectrum2) & 0xff00) << 8 | ((__uint32_t)(table->ACPILevel.SpllSpreadSpectrum2 ) & 0xff0000) >> 8 | ((__uint32_t)(table->ACPILevel .SpllSpreadSpectrum2) & 0xff000000) >> 24) : __swap32md (table->ACPILevel.SpllSpreadSpectrum2)));  | 
| 1369 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm)((table->ACPILevel.CcPwrDynRm) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.CcPwrDynRm) ? (__uint32_t)(((__uint32_t) (table->ACPILevel.CcPwrDynRm) & 0xff) << 24 | (( __uint32_t)(table->ACPILevel.CcPwrDynRm) & 0xff00) << 8 | ((__uint32_t)(table->ACPILevel.CcPwrDynRm) & 0xff0000 ) >> 8 | ((__uint32_t)(table->ACPILevel.CcPwrDynRm) & 0xff000000) >> 24) : __swap32md(table->ACPILevel.CcPwrDynRm )));  | 
| 1370 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1)((table->ACPILevel.CcPwrDynRm1) = (__uint32_t)(__builtin_constant_p (table->ACPILevel.CcPwrDynRm1) ? (__uint32_t)(((__uint32_t )(table->ACPILevel.CcPwrDynRm1) & 0xff) << 24 | ( (__uint32_t)(table->ACPILevel.CcPwrDynRm1) & 0xff00) << 8 | ((__uint32_t)(table->ACPILevel.CcPwrDynRm1) & 0xff0000 ) >> 8 | ((__uint32_t)(table->ACPILevel.CcPwrDynRm1) & 0xff000000) >> 24) : __swap32md(table->ACPILevel .CcPwrDynRm1)));  | 
| 1371 | |
| 1372 | if (!data->mclk_dpm_key_disabled) { | 
| 1373 | /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ | 
| 1374 | table->MemoryACPILevel.MclkFrequency = | 
| 1375 | data->dpm_table.mclk_table.dpm_levels[0].value; | 
| 1376 | result = fiji_get_dependency_volt_by_clk(hwmgr, | 
| 1377 | table_info->vdd_dep_on_mclk, | 
| 1378 | table->MemoryACPILevel.MclkFrequency, | 
| 1379 | (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd); | 
| 1380 | 		PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDCI voltage value in Clock Dependency Table" ); ; } } while (0)  | 
| 1381 | 				"Cannot find ACPI VDDCI voltage value in Clock Dependency Table",do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDCI voltage value in Clock Dependency Table" ); ; } } while (0)  | 
| 1382 | 				)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "Cannot find ACPI VDDCI voltage value in Clock Dependency Table" ); ; } } while (0);  | 
| 1383 | } else { | 
| 1384 | table->MemoryACPILevel.MclkFrequency = | 
| 1385 | data->vbios_boot_state.mclk_bootup_value; | 
| 1386 | table->MemoryACPILevel.MinVoltage = | 
| 1387 | data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE4; | 
| 1388 | } | 
| 1389 | |
| 1390 | us_mvdd = 0; | 
| 1391 | if ((SMU7_VOLTAGE_CONTROL_NONE0x0 == data->mvdd_control) || | 
| 1392 | (data->mclk_dpm_key_disabled)) | 
| 1393 | us_mvdd = data->vbios_boot_state.mvdd_bootup_value; | 
| 1394 | else { | 
| 1395 | if (!fiji_populate_mvdd_value(hwmgr, | 
| 1396 | data->dpm_table.mclk_table.dpm_levels[0].value, | 
| 1397 | &vol_level)) | 
| 1398 | us_mvdd = vol_level.Voltage; | 
| 1399 | } | 
| 1400 | |
| 1401 | table->MemoryACPILevel.MinMvdd = | 
| 1402 | 			PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE)(__uint32_t)(__builtin_constant_p(us_mvdd * 4) ? (__uint32_t) (((__uint32_t)(us_mvdd * 4) & 0xff) << 24 | ((__uint32_t )(us_mvdd * 4) & 0xff00) << 8 | ((__uint32_t)(us_mvdd * 4) & 0xff0000) >> 8 | ((__uint32_t)(us_mvdd * 4) & 0xff000000) >> 24) : __swap32md(us_mvdd * 4));  | 
| 1403 | |
| 1404 | table->MemoryACPILevel.EnabledForThrottle = 0; | 
| 1405 | table->MemoryACPILevel.EnabledForActivity = 0; | 
| 1406 | table->MemoryACPILevel.UpHyst = 0; | 
| 1407 | table->MemoryACPILevel.DownHyst = 100; | 
| 1408 | table->MemoryACPILevel.VoltageDownHyst = 0; | 
| 1409 | table->MemoryACPILevel.ActivityLevel = | 
| 1410 | 			PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity)(__uint16_t)(__builtin_constant_p(data->current_profile_setting .mclk_activity) ? (__uint16_t)(((__uint16_t)(data->current_profile_setting .mclk_activity) & 0xffU) << 8 | ((__uint16_t)(data-> current_profile_setting.mclk_activity) & 0xff00U) >> 8) : __swap16md(data->current_profile_setting.mclk_activity ));  | 
| 1411 | |
| 1412 | table->MemoryACPILevel.StutterEnable = false0; | 
| 1413 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency)((table->MemoryACPILevel.MclkFrequency) = (__uint32_t)(__builtin_constant_p (table->MemoryACPILevel.MclkFrequency) ? (__uint32_t)(((__uint32_t )(table->MemoryACPILevel.MclkFrequency) & 0xff) << 24 | ((__uint32_t)(table->MemoryACPILevel.MclkFrequency) & 0xff00) << 8 | ((__uint32_t)(table->MemoryACPILevel .MclkFrequency) & 0xff0000) >> 8 | ((__uint32_t)(table ->MemoryACPILevel.MclkFrequency) & 0xff000000) >> 24) : __swap32md(table->MemoryACPILevel.MclkFrequency)));  | 
| 1414 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage)((table->MemoryACPILevel.MinVoltage) = (__uint32_t)(__builtin_constant_p (table->MemoryACPILevel.MinVoltage) ? (__uint32_t)(((__uint32_t )(table->MemoryACPILevel.MinVoltage) & 0xff) << 24 | ((__uint32_t)(table->MemoryACPILevel.MinVoltage) & 0xff00 ) << 8 | ((__uint32_t)(table->MemoryACPILevel.MinVoltage ) & 0xff0000) >> 8 | ((__uint32_t)(table->MemoryACPILevel .MinVoltage) & 0xff000000) >> 24) : __swap32md(table ->MemoryACPILevel.MinVoltage)));  | 
| 1415 | |
| 1416 | return result; | 
| 1417 | } | 
| 1418 | |
| 1419 | static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr, | 
| 1420 | SMU73_Discrete_DpmTable *table) | 
| 1421 | { | 
| 1422 | int result = -EINVAL22; | 
| 1423 | uint8_t count; | 
| 1424 | struct pp_atomctrl_clock_dividers_vi dividers; | 
| 1425 | struct phm_ppt_v1_information *table_info = | 
| 1426 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 1427 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = | 
| 1428 | table_info->mm_dep_table; | 
| 1429 | |
| 1430 | table->VceLevelCount = (uint8_t)(mm_table->count); | 
| 1431 | table->VceBootLevel = 0; | 
| 1432 | |
| 1433 | for (count = 0; count < table->VceLevelCount; count++) { | 
| 1434 | table->VceLevel[count].Frequency = mm_table->entries[count].eclk; | 
| 1435 | table->VceLevel[count].MinVoltage = 0; | 
| 1436 | table->VceLevel[count].MinVoltage |= | 
| 1437 | (mm_table->entries[count].vddc * VOLTAGE_SCALE4) << VDDC_SHIFT0; | 
| 1438 | table->VceLevel[count].MinVoltage |= | 
| 1439 | ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA300) * | 
| 1440 | VOLTAGE_SCALE4) << VDDCI_SHIFT15; | 
| 1441 | table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT30; | 
| 1442 | |
| 1443 | /*retrieve divider value for VBIOS */ | 
| 1444 | result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, | 
| 1445 | table->VceLevel[count].Frequency, ÷rs); | 
| 1446 | 		PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for VCE engine clock"); return result ; } } while (0)  | 
| 1447 | 				"can not find divide id for VCE engine clock",do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for VCE engine clock"); return result ; } } while (0)  | 
| 1448 | 				return result)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for VCE engine clock"); return result ; } } while (0);  | 
| 1449 | |
| 1450 | table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; | 
| 1451 | |
| 1452 | 		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency)((table->VceLevel[count].Frequency) = (__uint32_t)(__builtin_constant_p (table->VceLevel[count].Frequency) ? (__uint32_t)(((__uint32_t )(table->VceLevel[count].Frequency) & 0xff) << 24 | ((__uint32_t)(table->VceLevel[count].Frequency) & 0xff00 ) << 8 | ((__uint32_t)(table->VceLevel[count].Frequency ) & 0xff0000) >> 8 | ((__uint32_t)(table->VceLevel [count].Frequency) & 0xff000000) >> 24) : __swap32md (table->VceLevel[count].Frequency)));  | 
| 1453 | 		CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage)((table->VceLevel[count].MinVoltage) = (__uint32_t)(__builtin_constant_p (table->VceLevel[count].MinVoltage) ? (__uint32_t)(((__uint32_t )(table->VceLevel[count].MinVoltage) & 0xff) << 24 | ((__uint32_t)(table->VceLevel[count].MinVoltage) & 0xff00 ) << 8 | ((__uint32_t)(table->VceLevel[count].MinVoltage ) & 0xff0000) >> 8 | ((__uint32_t)(table->VceLevel [count].MinVoltage) & 0xff000000) >> 24) : __swap32md (table->VceLevel[count].MinVoltage)));  | 
| 1454 | } | 
| 1455 | return result; | 
| 1456 | } | 
| 1457 | |
| 1458 | static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr, | 
| 1459 | SMU73_Discrete_DpmTable *table) | 
| 1460 | { | 
| 1461 | int result = -EINVAL22; | 
| 1462 | uint8_t count; | 
| 1463 | struct pp_atomctrl_clock_dividers_vi dividers; | 
| 1464 | struct phm_ppt_v1_information *table_info = | 
| 1465 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 1466 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = | 
| 1467 | table_info->mm_dep_table; | 
| 1468 | |
| 1469 | table->AcpLevelCount = (uint8_t)(mm_table->count); | 
| 1470 | table->AcpBootLevel = 0; | 
| 1471 | |
| 1472 | for (count = 0; count < table->AcpLevelCount; count++) { | 
| 1473 | table->AcpLevel[count].Frequency = mm_table->entries[count].aclk; | 
| 1474 | table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc * | 
| 1475 | VOLTAGE_SCALE4) << VDDC_SHIFT0; | 
| 1476 | table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - | 
| 1477 | VDDC_VDDCI_DELTA300) * VOLTAGE_SCALE4) << VDDCI_SHIFT15; | 
| 1478 | table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT30; | 
| 1479 | |
| 1480 | /* retrieve divider value for VBIOS */ | 
| 1481 | result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, | 
| 1482 | table->AcpLevel[count].Frequency, ÷rs); | 
| 1483 | 		PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for engine clock"); return result; } } while (0)  | 
| 1484 | 				"can not find divide id for engine clock", return result)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for engine clock"); return result; } } while (0);  | 
| 1485 | |
| 1486 | table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; | 
| 1487 | |
| 1488 | 		CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency)((table->AcpLevel[count].Frequency) = (__uint32_t)(__builtin_constant_p (table->AcpLevel[count].Frequency) ? (__uint32_t)(((__uint32_t )(table->AcpLevel[count].Frequency) & 0xff) << 24 | ((__uint32_t)(table->AcpLevel[count].Frequency) & 0xff00 ) << 8 | ((__uint32_t)(table->AcpLevel[count].Frequency ) & 0xff0000) >> 8 | ((__uint32_t)(table->AcpLevel [count].Frequency) & 0xff000000) >> 24) : __swap32md (table->AcpLevel[count].Frequency)));  | 
| 1489 | 		CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage)((table->AcpLevel[count].MinVoltage) = (__uint32_t)(__builtin_constant_p (table->AcpLevel[count].MinVoltage) ? (__uint32_t)(((__uint32_t )(table->AcpLevel[count].MinVoltage) & 0xff) << 24 | ((__uint32_t)(table->AcpLevel[count].MinVoltage) & 0xff00 ) << 8 | ((__uint32_t)(table->AcpLevel[count].MinVoltage ) & 0xff0000) >> 8 | ((__uint32_t)(table->AcpLevel [count].MinVoltage) & 0xff000000) >> 24) : __swap32md (table->AcpLevel[count].MinVoltage)));  | 
| 1490 | } | 
| 1491 | return result; | 
| 1492 | } | 
| 1493 | |
| 1494 | static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, | 
| 1495 | int32_t eng_clock, int32_t mem_clock, | 
| 1496 | struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs) | 
| 1497 | { | 
| 1498 | uint32_t dram_timing; | 
| 1499 | uint32_t dram_timing2; | 
| 1500 | uint32_t burstTime; | 
| 1501 | ULONG trrds, trrdl; | 
| 1502 | int result; | 
| 1503 | |
| 1504 | result = atomctrl_set_engine_dram_timings_rv770(hwmgr, | 
| 1505 | eng_clock, mem_clock); | 
| 1506 | 	PP_ASSERT_WITH_CODE(result == 0,do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error calling VBIOS to set DRAM_TIMING." ); return result; } } while (0)  | 
| 1507 | 			"Error calling VBIOS to set DRAM_TIMING.", return result)do { if (!(result == 0)) { printk("\0014" "amdgpu: " "%s\n", "Error calling VBIOS to set DRAM_TIMING." ); return result; } } while (0);  | 
| 1508 | |
| 1509 | 	dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING)(((struct cgs_device *)hwmgr->device)->ops->read_register (hwmgr->device,0x9dd));  | 
| 1510 | 	dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2)(((struct cgs_device *)hwmgr->device)->ops->read_register (hwmgr->device,0x9de));  | 
| 1511 | 	burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME)(((struct cgs_device *)hwmgr->device)->ops->read_register (hwmgr->device,0xa02));  | 
| 1512 | |
| 1513 | trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0)(((burstTime) & 0x7c00) >> 0xa); | 
| 1514 | trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0)(((burstTime) & 0x1f00000) >> 0x14); | 
| 1515 | |
| 1516 | 	arb_regs->McArbDramTiming  = PP_HOST_TO_SMC_UL(dram_timing)(__uint32_t)(__builtin_constant_p(dram_timing) ? (__uint32_t) (((__uint32_t)(dram_timing) & 0xff) << 24 | ((__uint32_t )(dram_timing) & 0xff00) << 8 | ((__uint32_t)(dram_timing ) & 0xff0000) >> 8 | ((__uint32_t)(dram_timing) & 0xff000000) >> 24) : __swap32md(dram_timing));  | 
| 1517 | 	arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2)(__uint32_t)(__builtin_constant_p(dram_timing2) ? (__uint32_t )(((__uint32_t)(dram_timing2) & 0xff) << 24 | ((__uint32_t )(dram_timing2) & 0xff00) << 8 | ((__uint32_t)(dram_timing2 ) & 0xff0000) >> 8 | ((__uint32_t)(dram_timing2) & 0xff000000) >> 24) : __swap32md(dram_timing2));  | 
| 1518 | arb_regs->McArbBurstTime = (uint8_t)burstTime; | 
| 1519 | arb_regs->TRRDS = (uint8_t)trrds; | 
| 1520 | arb_regs->TRRDL = (uint8_t)trrdl; | 
| 1521 | |
| 1522 | return 0; | 
| 1523 | } | 
| 1524 | |
| 1525 | static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) | 
| 1526 | { | 
| 1527 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 1528 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 1529 | struct SMU73_Discrete_MCArbDramTimingTable arb_regs; | 
| 1530 | uint32_t i, j; | 
| 1531 | int result = 0; | 
| 1532 | |
| 1533 | for (i = 0; i < data->dpm_table.sclk_table.count; i++) { | 
| 1534 | for (j = 0; j < data->dpm_table.mclk_table.count; j++) { | 
| 1535 | result = fiji_populate_memory_timing_parameters(hwmgr, | 
| 1536 | data->dpm_table.sclk_table.dpm_levels[i].value, | 
| 1537 | data->dpm_table.mclk_table.dpm_levels[j].value, | 
| 1538 | &arb_regs.entries[i][j]); | 
| 1539 | if (result) | 
| 1540 | break; | 
| 1541 | } | 
| 1542 | } | 
| 1543 | |
| 1544 | if (!result) | 
| 1545 | result = smu7_copy_bytes_to_smc( | 
| 1546 | hwmgr, | 
| 1547 | smu_data->smu7_data.arb_table_start, | 
| 1548 | (uint8_t *)&arb_regs, | 
| 1549 | sizeof(SMU73_Discrete_MCArbDramTimingTable), | 
| 1550 | SMC_RAM_END0x40000); | 
| 1551 | return result; | 
| 1552 | } | 
| 1553 | |
| 1554 | static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, | 
| 1555 | struct SMU73_Discrete_DpmTable *table) | 
| 1556 | { | 
| 1557 | int result = -EINVAL22; | 
| 1558 | uint8_t count; | 
| 1559 | struct pp_atomctrl_clock_dividers_vi dividers; | 
| 1560 | struct phm_ppt_v1_information *table_info = | 
| 1561 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 1562 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = | 
| 1563 | table_info->mm_dep_table; | 
| 1564 | |
| 1565 | table->UvdLevelCount = (uint8_t)(mm_table->count); | 
| 1566 | table->UvdBootLevel = 0; | 
| 1567 | |
| 1568 | for (count = 0; count < table->UvdLevelCount; count++) { | 
| 1569 | table->UvdLevel[count].MinVoltage = 0; | 
| 1570 | table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; | 
| 1571 | table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; | 
| 1572 | table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * | 
| 1573 | VOLTAGE_SCALE4) << VDDC_SHIFT0; | 
| 1574 | table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - | 
| 1575 | VDDC_VDDCI_DELTA300) * VOLTAGE_SCALE4) << VDDCI_SHIFT15; | 
| 1576 | table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT30; | 
| 1577 | |
| 1578 | /* retrieve divider value for VBIOS */ | 
| 1579 | result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, | 
| 1580 | table->UvdLevel[count].VclkFrequency, ÷rs); | 
| 1581 | 		PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for Vclk clock"); return result; } } while (0)  | 
| 1582 | 				"can not find divide id for Vclk clock", return result)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for Vclk clock"); return result; } } while (0);  | 
| 1583 | |
| 1584 | table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; | 
| 1585 | |
| 1586 | result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, | 
| 1587 | table->UvdLevel[count].DclkFrequency, ÷rs); | 
| 1588 | 		PP_ASSERT_WITH_CODE((0 == result),do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for Dclk clock"); return result; } } while (0)  | 
| 1589 | 				"can not find divide id for Dclk clock", return result)do { if (!((0 == result))) { printk("\0014" "amdgpu: " "%s\n" , "can not find divide id for Dclk clock"); return result; } } while (0);  | 
| 1590 | |
| 1591 | table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; | 
| 1592 | |
| 1593 | 		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency)((table->UvdLevel[count].VclkFrequency) = (__uint32_t)(__builtin_constant_p (table->UvdLevel[count].VclkFrequency) ? (__uint32_t)(((__uint32_t )(table->UvdLevel[count].VclkFrequency) & 0xff) << 24 | ((__uint32_t)(table->UvdLevel[count].VclkFrequency) & 0xff00) << 8 | ((__uint32_t)(table->UvdLevel[count] .VclkFrequency) & 0xff0000) >> 8 | ((__uint32_t)(table ->UvdLevel[count].VclkFrequency) & 0xff000000) >> 24) : __swap32md(table->UvdLevel[count].VclkFrequency)));  | 
| 1594 | 		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency)((table->UvdLevel[count].DclkFrequency) = (__uint32_t)(__builtin_constant_p (table->UvdLevel[count].DclkFrequency) ? (__uint32_t)(((__uint32_t )(table->UvdLevel[count].DclkFrequency) & 0xff) << 24 | ((__uint32_t)(table->UvdLevel[count].DclkFrequency) & 0xff00) << 8 | ((__uint32_t)(table->UvdLevel[count] .DclkFrequency) & 0xff0000) >> 8 | ((__uint32_t)(table ->UvdLevel[count].DclkFrequency) & 0xff000000) >> 24) : __swap32md(table->UvdLevel[count].DclkFrequency)));  | 
| 1595 | 		CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage)((table->UvdLevel[count].MinVoltage) = (__uint32_t)(__builtin_constant_p (table->UvdLevel[count].MinVoltage) ? (__uint32_t)(((__uint32_t )(table->UvdLevel[count].MinVoltage) & 0xff) << 24 | ((__uint32_t)(table->UvdLevel[count].MinVoltage) & 0xff00 ) << 8 | ((__uint32_t)(table->UvdLevel[count].MinVoltage ) & 0xff0000) >> 8 | ((__uint32_t)(table->UvdLevel [count].MinVoltage) & 0xff000000) >> 24) : __swap32md (table->UvdLevel[count].MinVoltage)));  | 
| 1596 | |
| 1597 | } | 
| 1598 | return result; | 
| 1599 | } | 
| 1600 | |
| 1601 | static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr, | 
| 1602 | struct SMU73_Discrete_DpmTable *table) | 
| 1603 | { | 
| 1604 | int result = 0; | 
| 1605 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 1606 | |
| 1607 | table->GraphicsBootLevel = 0; | 
| 1608 | table->MemoryBootLevel = 0; | 
| 1609 | |
| 1610 | /* find boot level from dpm table */ | 
| 1611 | result = phm_find_boot_level(&(data->dpm_table.sclk_table), | 
| 1612 | data->vbios_boot_state.sclk_bootup_value, | 
| 1613 | (uint32_t *)&(table->GraphicsBootLevel)); | 
| 1614 | |
| 1615 | result = phm_find_boot_level(&(data->dpm_table.mclk_table), | 
Value stored to 'result' is never read  | |
| 1616 | data->vbios_boot_state.mclk_bootup_value, | 
| 1617 | (uint32_t *)&(table->MemoryBootLevel)); | 
| 1618 | |
| 1619 | table->BootVddc = data->vbios_boot_state.vddc_bootup_value * | 
| 1620 | VOLTAGE_SCALE4; | 
| 1621 | table->BootVddci = data->vbios_boot_state.vddci_bootup_value * | 
| 1622 | VOLTAGE_SCALE4; | 
| 1623 | table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * | 
| 1624 | VOLTAGE_SCALE4; | 
| 1625 | |
| 1626 | 	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc)((table->BootVddc) = (__uint16_t)(__builtin_constant_p(table ->BootVddc) ? (__uint16_t)(((__uint16_t)(table->BootVddc ) & 0xffU) << 8 | ((__uint16_t)(table->BootVddc) & 0xff00U) >> 8) : __swap16md(table->BootVddc)) );  | 
| 1627 | 	CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci)((table->BootVddci) = (__uint16_t)(__builtin_constant_p(table ->BootVddci) ? (__uint16_t)(((__uint16_t)(table->BootVddci ) & 0xffU) << 8 | ((__uint16_t)(table->BootVddci ) & 0xff00U) >> 8) : __swap16md(table->BootVddci )));  | 
| 1628 | 	CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd)((table->BootMVdd) = (__uint16_t)(__builtin_constant_p(table ->BootMVdd) ? (__uint16_t)(((__uint16_t)(table->BootMVdd ) & 0xffU) << 8 | ((__uint16_t)(table->BootMVdd) & 0xff00U) >> 8) : __swap16md(table->BootMVdd)) );  | 
| 1629 | |
| 1630 | return 0; | 
| 1631 | } | 
| 1632 | |
| 1633 | static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) | 
| 1634 | { | 
| 1635 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 1636 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 1637 | struct phm_ppt_v1_information *table_info = | 
| 1638 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 1639 | uint8_t count, level; | 
| 1640 | |
| 1641 | count = (uint8_t)(table_info->vdd_dep_on_sclk->count); | 
| 1642 | for (level = 0; level < count; level++) { | 
| 1643 | if (table_info->vdd_dep_on_sclk->entries[level].clk >= | 
| 1644 | data->vbios_boot_state.sclk_bootup_value) { | 
| 1645 | smu_data->smc_state_table.GraphicsBootLevel = level; | 
| 1646 | break; | 
| 1647 | } | 
| 1648 | } | 
| 1649 | |
| 1650 | count = (uint8_t)(table_info->vdd_dep_on_mclk->count); | 
| 1651 | for (level = 0; level < count; level++) { | 
| 1652 | if (table_info->vdd_dep_on_mclk->entries[level].clk >= | 
| 1653 | data->vbios_boot_state.mclk_bootup_value) { | 
| 1654 | smu_data->smc_state_table.MemoryBootLevel = level; | 
| 1655 | break; | 
| 1656 | } | 
| 1657 | } | 
| 1658 | |
| 1659 | return 0; | 
| 1660 | } | 
| 1661 | |
| 1662 | static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) | 
| 1663 | { | 
| 1664 | uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, | 
| 1665 | volt_with_cks, value; | 
| 1666 | uint16_t clock_freq_u16; | 
| 1667 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 1668 | uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, | 
| 1669 | volt_offset = 0; | 
| 1670 | struct phm_ppt_v1_information *table_info = | 
| 1671 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 1672 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = | 
| 1673 | table_info->vdd_dep_on_sclk; | 
| 1674 | |
| 1675 | stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; | 
| 1676 | |
| 1677 | /* Read SMU_Eefuse to read and calculate RO and determine | 
| 1678 | * if the part is SS or FF. if RO >= 1660MHz, part is FF. | 
| 1679 | */ | 
| 1680 | 	efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0100000 + (146 * 4)))  | 
| 1681 | 			ixSMU_EFUSE_0 + (146 * 4))(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0100000 + (146 * 4)));  | 
| 1682 | 	efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0100000 + (148 * 4)))  | 
| 1683 | 			ixSMU_EFUSE_0 + (148 * 4))(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0100000 + (148 * 4)));  | 
| 1684 | efuse &= 0xFF000000; | 
| 1685 | efuse = efuse >> 24; | 
| 1686 | efuse2 &= 0xF; | 
| 1687 | |
| 1688 | if (efuse2 == 1) | 
| 1689 | ro = (2300 - 1350) * efuse / 255 + 1350; | 
| 1690 | else | 
| 1691 | ro = (2500 - 1000) * efuse / 255 + 1000; | 
| 1692 | |
| 1693 | if (ro >= 1660) | 
| 1694 | type = 0; | 
| 1695 | else | 
| 1696 | type = 1; | 
| 1697 | |
| 1698 | /* Populate Stretch amount */ | 
| 1699 | smu_data->smc_state_table.ClockStretcherAmount = stretch_amount; | 
| 1700 | |
| 1701 | /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ | 
| 1702 | for (i = 0; i < sclk_table->count; i++) { | 
| 1703 | smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |= | 
| 1704 | sclk_table->entries[i].cks_enable << i; | 
| 1705 | volt_without_cks = (uint32_t)((14041 * | 
| 1706 | (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / | 
| 1707 | (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); | 
| 1708 | volt_with_cks = (uint32_t)((13946 * | 
| 1709 | (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / | 
| 1710 | (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); | 
| 1711 | if (volt_without_cks >= volt_with_cks) | 
| 1712 | volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + | 
| 1713 | sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); | 
| 1714 | smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; | 
| 1715 | } | 
| 1716 | |
| 1717 | 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x1) | (0x1 & ((0x0) << 0x0)))))  | 
| 1718 | 			STRETCH_ENABLE, 0x0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x1) | (0x1 & ((0x0) << 0x0)))));  | 
| 1719 | 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x2) | (0x2 & ((0x1) << 0x1)))))  | 
| 1720 | 			masterReset, 0x1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x2) | (0x2 & ((0x1) << 0x1)))));  | 
| 1721 | 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x4) | (0x4 & ((0x1) << 0x2)))))  | 
| 1722 | 			staticEnable, 0x1)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x4) | (0x4 & ((0x1) << 0x2)))));  | 
| 1723 | 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x2) | (0x2 & ((0x0) << 0x1)))))  | 
| 1724 | 			masterReset, 0x0)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020034c,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,0xc020034c))) & ~0x2) | (0x2 & ((0x0) << 0x1)))));  | 
| 1725 | |
| 1726 | /* Populate CKS Lookup Table */ | 
| 1727 | if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) | 
| 1728 | stretch_amount2 = 0; | 
| 1729 | else if (stretch_amount == 3 || stretch_amount == 4) | 
| 1730 | stretch_amount2 = 1; | 
| 1731 | else { | 
| 1732 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | 
| 1733 | PHM_PlatformCaps_ClockStretcher); | 
| 1734 | 		PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Stretch Amount in PPTable not supported" ); return -22; } } while (0)  | 
| 1735 | 				"Stretch Amount in PPTable not supported",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Stretch Amount in PPTable not supported" ); return -22; } } while (0)  | 
| 1736 | 				return -EINVAL)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "Stretch Amount in PPTable not supported" ); return -22; } } while (0);  | 
| 1737 | } | 
| 1738 | |
| 1739 | 	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0200350))  | 
| 1740 | 			ixPWR_CKS_CNTL)(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0200350));  | 
| 1741 | value &= 0xFFC2FF87; | 
| 1742 | smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = | 
| 1743 | fiji_clock_stretcher_lookup_table[stretch_amount2][0]; | 
| 1744 | smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = | 
| 1745 | fiji_clock_stretcher_lookup_table[stretch_amount2][1]; | 
| 1746 | 	clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.(__uint32_t)(__builtin_constant_p(smu_data->smc_state_table . GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) ? (__uint32_t)(((__uint32_t)(smu_data-> smc_state_table. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff) << 24 | ((__uint32_t) (smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table .GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff00) << 8 | ((__uint32_t)(smu_data->smc_state_table. GraphicsLevel [smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency ) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table . GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff000000) >> 24) : __swap32md (smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table .GraphicsDpmLevelCount - 1]. SclkFrequency))  | 
| 1747 | 			GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].(__uint32_t)(__builtin_constant_p(smu_data->smc_state_table . GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) ? (__uint32_t)(((__uint32_t)(smu_data-> smc_state_table. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff) << 24 | ((__uint32_t) (smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table .GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff00) << 8 | ((__uint32_t)(smu_data->smc_state_table. GraphicsLevel [smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency ) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table . GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff000000) >> 24) : __swap32md (smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table .GraphicsDpmLevelCount - 1]. SclkFrequency))  | 
| 1748 | 			SclkFrequency)(__uint32_t)(__builtin_constant_p(smu_data->smc_state_table . GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) ? (__uint32_t)(((__uint32_t)(smu_data-> smc_state_table. GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff) << 24 | ((__uint32_t) (smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table .GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff00) << 8 | ((__uint32_t)(smu_data->smc_state_table. GraphicsLevel [smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency ) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table . GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1]. SclkFrequency) & 0xff000000) >> 24) : __swap32md (smu_data->smc_state_table. GraphicsLevel[smu_data->smc_state_table .GraphicsDpmLevelCount - 1]. SclkFrequency)) / 100);  | 
| 1749 | if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] < | 
| 1750 | clock_freq_u16 && | 
| 1751 | fiji_clock_stretcher_lookup_table[stretch_amount2][1] > | 
| 1752 | clock_freq_u16) { | 
| 1753 | /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ | 
| 1754 | value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; | 
| 1755 | /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ | 
| 1756 | value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; | 
| 1757 | /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ | 
| 1758 | value |= (fiji_clock_stretch_amount_conversion | 
| 1759 | [fiji_clock_stretcher_lookup_table[stretch_amount2][3]] | 
| 1760 | [stretch_amount]) << 3; | 
| 1761 | } | 
| 1762 | 	CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.((smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].minFreq) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0].minFreq ) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable . CKS_LOOKUPTableEntry[0].minFreq) & 0xffU) << 8 | ( (__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].minFreq) & 0xff00U) >> 8) : __swap16md(smu_data ->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0] .minFreq)))  | 
| 1763 | 			CKS_LOOKUPTableEntry[0].minFreq)((smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].minFreq) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0].minFreq ) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable . CKS_LOOKUPTableEntry[0].minFreq) & 0xffU) << 8 | ( (__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].minFreq) & 0xff00U) >> 8) : __swap16md(smu_data ->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0] .minFreq)));  | 
| 1764 | 	CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.((smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].maxFreq) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0].maxFreq ) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable . CKS_LOOKUPTableEntry[0].maxFreq) & 0xffU) << 8 | ( (__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].maxFreq) & 0xff00U) >> 8) : __swap16md(smu_data ->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0] .maxFreq)))  | 
| 1765 | 			CKS_LOOKUPTableEntry[0].maxFreq)((smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].maxFreq) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0].maxFreq ) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable . CKS_LOOKUPTableEntry[0].maxFreq) & 0xffU) << 8 | ( (__uint16_t)(smu_data->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry [0].maxFreq) & 0xff00U) >> 8) : __swap16md(smu_data ->smc_state_table.CKS_LOOKUPTable. CKS_LOOKUPTableEntry[0] .maxFreq)));  | 
| 1766 | smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = | 
| 1767 | fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; | 
| 1768 | smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= | 
| 1769 | (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; | 
| 1770 | |
| 1771 | 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0200350,value))  | 
| 1772 | 			ixPWR_CKS_CNTL, value)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0200350,value));  | 
| 1773 | |
| 1774 | /* Populate DDT Lookup Table */ | 
| 1775 | for (i = 0; i < 4; i++) { | 
| 1776 | /* Assign the minimum and maximum VID stored | 
| 1777 | * in the last row of Clock Stretcher Voltage Table. | 
| 1778 | */ | 
| 1779 | smu_data->smc_state_table.ClockStretcherDataTable. | 
| 1780 | ClockStretcherDataTableEntry[i].minVID = | 
| 1781 | (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2]; | 
| 1782 | smu_data->smc_state_table.ClockStretcherDataTable. | 
| 1783 | ClockStretcherDataTableEntry[i].maxVID = | 
| 1784 | (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3]; | 
| 1785 | /* Loop through each SCLK and check the frequency | 
| 1786 | * to see if it lies within the frequency for clock stretcher. | 
| 1787 | */ | 
| 1788 | for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) { | 
| 1789 | cks_setting = 0; | 
| 1790 | 			clock_freq = PP_SMC_TO_HOST_UL((__uint32_t)(__builtin_constant_p(smu_data->smc_state_table .GraphicsLevel[j].SclkFrequency) ? (__uint32_t)(((__uint32_t) (smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency) & 0xff) << 24 | ((__uint32_t)(smu_data->smc_state_table .GraphicsLevel[j].SclkFrequency) & 0xff00) << 8 | ( (__uint32_t)(smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency ) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table .GraphicsLevel[j].SclkFrequency) & 0xff000000) >> 24 ) : __swap32md(smu_data->smc_state_table.GraphicsLevel[j]. SclkFrequency))  | 
| 1791 | 					smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency)(__uint32_t)(__builtin_constant_p(smu_data->smc_state_table .GraphicsLevel[j].SclkFrequency) ? (__uint32_t)(((__uint32_t) (smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency) & 0xff) << 24 | ((__uint32_t)(smu_data->smc_state_table .GraphicsLevel[j].SclkFrequency) & 0xff00) << 8 | ( (__uint32_t)(smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency ) & 0xff0000) >> 8 | ((__uint32_t)(smu_data->smc_state_table .GraphicsLevel[j].SclkFrequency) & 0xff000000) >> 24 ) : __swap32md(smu_data->smc_state_table.GraphicsLevel[j]. SclkFrequency));  | 
| 1792 | /* Check the allowed frequency against the sclk level[j]. | 
| 1793 | * Sclk's endianness has already been converted, | 
| 1794 | * and it's in 10Khz unit, | 
| 1795 | * as opposed to Data table, which is in Mhz unit. | 
| 1796 | */ | 
| 1797 | if (clock_freq >= | 
| 1798 | (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) { | 
| 1799 | cks_setting |= 0x2; | 
| 1800 | if (clock_freq < | 
| 1801 | (fiji_clock_stretcher_ddt_table[type][i][1]) * 100) | 
| 1802 | cks_setting |= 0x1; | 
| 1803 | } | 
| 1804 | smu_data->smc_state_table.ClockStretcherDataTable. | 
| 1805 | ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2); | 
| 1806 | } | 
| 1807 | 		CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.((smu_data->smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry [i].setting) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry [i].setting) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ) & 0xffU) << 8 | ((__uint16_t)(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ) & 0xff00U) >> 8) : __swap16md(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting )))  | 
| 1808 | 				ClockStretcherDataTable.((smu_data->smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry [i].setting) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry [i].setting) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ) & 0xffU) << 8 | ((__uint16_t)(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ) & 0xff00U) >> 8) : __swap16md(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting )))  | 
| 1809 | 				ClockStretcherDataTableEntry[i].setting)((smu_data->smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry [i].setting) = (__uint16_t)(__builtin_constant_p(smu_data-> smc_state_table. ClockStretcherDataTable. ClockStretcherDataTableEntry [i].setting) ? (__uint16_t)(((__uint16_t)(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ) & 0xffU) << 8 | ((__uint16_t)(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting ) & 0xff00U) >> 8) : __swap16md(smu_data->smc_state_table . ClockStretcherDataTable. ClockStretcherDataTableEntry[i].setting )));  | 
| 1810 | } | 
| 1811 | |
| 1812 | 	value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL)(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0200350));  | 
| 1813 | value &= 0xFFFFFFFE; | 
| 1814 | 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0200350,value));  | 
| 1815 | |
| 1816 | return 0; | 
| 1817 | } | 
| 1818 | |
| 1819 | static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr, | 
| 1820 | struct SMU73_Discrete_DpmTable *table) | 
| 1821 | { | 
| 1822 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 1823 | uint16_t config; | 
| 1824 | |
| 1825 | config = VR_MERGED_WITH_VDDC0; | 
| 1826 | table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT8); | 
| 1827 | |
| 1828 | /* Set Vddc Voltage Controller */ | 
| 1829 | if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->voltage_control) { | 
| 1830 | config = VR_SVI2_PLANE_11; | 
| 1831 | table->VRConfig |= config; | 
| 1832 | } else { | 
| 1833 | 		PP_ASSERT_WITH_CODE(false,do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "VDDC should be on SVI2 control in merged mode!" ); ; } } while (0)  | 
| 1834 | 				"VDDC should be on SVI2 control in merged mode!",do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "VDDC should be on SVI2 control in merged mode!" ); ; } } while (0)  | 
| 1835 | 				)do { if (!(0)) { printk("\0014" "amdgpu: " "%s\n", "VDDC should be on SVI2 control in merged mode!" ); ; } } while (0);  | 
| 1836 | } | 
| 1837 | /* Set Vddci Voltage Controller */ | 
| 1838 | if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->vddci_control) { | 
| 1839 | config = VR_SVI2_PLANE_22; /* only in merged mode */ | 
| 1840 | table->VRConfig |= (config << VRCONF_VDDCI_SHIFT16); | 
| 1841 | } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO0x1 == data->vddci_control) { | 
| 1842 | config = VR_SMIO_PATTERN_13; | 
| 1843 | table->VRConfig |= (config << VRCONF_VDDCI_SHIFT16); | 
| 1844 | } else { | 
| 1845 | config = VR_STATIC_VOLTAGE5; | 
| 1846 | table->VRConfig |= (config << VRCONF_VDDCI_SHIFT16); | 
| 1847 | } | 
| 1848 | /* Set Mvdd Voltage Controller */ | 
| 1849 | if (SMU7_VOLTAGE_CONTROL_BY_SVID20x2 == data->mvdd_control) { | 
| 1850 | config = VR_SVI2_PLANE_22; | 
| 1851 | table->VRConfig |= (config << VRCONF_MVDD_SHIFT24); | 
| 1852 | } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO0x1 == data->mvdd_control) { | 
| 1853 | config = VR_SMIO_PATTERN_24; | 
| 1854 | table->VRConfig |= (config << VRCONF_MVDD_SHIFT24); | 
| 1855 | } else { | 
| 1856 | config = VR_STATIC_VOLTAGE5; | 
| 1857 | table->VRConfig |= (config << VRCONF_MVDD_SHIFT24); | 
| 1858 | } | 
| 1859 | |
| 1860 | return 0; | 
| 1861 | } | 
| 1862 | |
| 1863 | static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr) | 
| 1864 | { | 
| 1865 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 1866 | uint32_t tmp; | 
| 1867 | int result; | 
| 1868 | |
| 1869 | /* This is a read-modify-write on the first byte of the ARB table. | 
| 1870 | * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure | 
| 1871 | * is the field 'current'. | 
| 1872 | * This solution is ugly, but we never write the whole table only | 
| 1873 | * individual fields in it. | 
| 1874 | * In reality this field should not be in that structure | 
| 1875 | * but in a soft register. | 
| 1876 | */ | 
| 1877 | result = smu7_read_smc_sram_dword(hwmgr, | 
| 1878 | smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END0x40000); | 
| 1879 | |
| 1880 | if (result) | 
| 1881 | return result; | 
| 1882 | |
| 1883 | tmp &= 0x00FFFFFF; | 
| 1884 | tmp |= ((uint32_t)MC_CG_ARB_FREQ_F10x0b) << 24; | 
| 1885 | |
| 1886 | return smu7_write_smc_sram_dword(hwmgr, | 
| 1887 | smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END0x40000); | 
| 1888 | } | 
| 1889 | |
| 1890 | static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr) | 
| 1891 | { | 
| 1892 | pp_atomctrl_voltage_table param_led_dpm; | 
| 1893 | int result = 0; | 
| 1894 | u32 mask = 0; | 
| 1895 | |
| 1896 | result = atomctrl_get_voltage_table_v3(hwmgr, | 
| 1897 | VOLTAGE_TYPE_LEDDPM8, VOLTAGE_OBJ_GPIO_LUT0, | 
| 1898 | ¶m_led_dpm); | 
| 1899 | if (result == 0) { | 
| 1900 | int i, j; | 
| 1901 | u32 tmp = param_led_dpm.mask_low; | 
| 1902 | |
| 1903 | for (i = 0, j = 0; i < 32; i++) { | 
| 1904 | if (tmp & 1) { | 
| 1905 | mask |= (i << (8 * j)); | 
| 1906 | if (++j >= 3) | 
| 1907 | break; | 
| 1908 | } | 
| 1909 | tmp >>= 1; | 
| 1910 | } | 
| 1911 | } | 
| 1912 | if (mask) | 
| 1913 | smum_send_msg_to_smc_with_parameter(hwmgr, | 
| 1914 | PPSMC_MSG_LedConfig((uint16_t) 0x274), | 
| 1915 | mask, | 
| 1916 | NULL((void *)0)); | 
| 1917 | return 0; | 
| 1918 | } | 
| 1919 | |
| 1920 | static int fiji_init_smc_table(struct pp_hwmgr *hwmgr) | 
| 1921 | { | 
| 1922 | int result; | 
| 1923 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 1924 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 1925 | struct phm_ppt_v1_information *table_info = | 
| 1926 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 1927 | struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table); | 
| 1928 | uint8_t i; | 
| 1929 | struct pp_atomctrl_gpio_pin_assignment gpio_pin; | 
| 1930 | |
| 1931 | fiji_initialize_power_tune_defaults(hwmgr); | 
| 1932 | |
| 1933 | if (SMU7_VOLTAGE_CONTROL_NONE0x0 != data->voltage_control) | 
| 1934 | fiji_populate_smc_voltage_tables(hwmgr, table); | 
| 1935 | |
| 1936 | table->SystemFlags = 0; | 
| 1937 | |
| 1938 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 
| 1939 | PHM_PlatformCaps_AutomaticDCTransition)) | 
| 1940 | table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC0x01; | 
| 1941 | |
| 1942 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 
| 1943 | PHM_PlatformCaps_StepVddc)) | 
| 1944 | table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC0x02; | 
| 1945 | |
| 1946 | if (data->is_memory_gddr5) | 
| 1947 | table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR50x04; | 
| 1948 | |
| 1949 | if (data->ulv_supported && table_info->us_ulv_voltage_offset) { | 
| 1950 | result = fiji_populate_ulv_state(hwmgr, table); | 
| 1951 | 		PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ULV state!" ); return result; } } while (0)  | 
| 1952 | 				"Failed to initialize ULV state!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ULV state!" ); return result; } } while (0);  | 
| 1953 | 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020015c,0x40035))  | 
| 1954 | 				ixCG_ULV_PARAMETER, 0x40035)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc020015c,0x40035));  | 
| 1955 | } | 
| 1956 | |
| 1957 | result = fiji_populate_smc_link_level(hwmgr, table); | 
| 1958 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Link Level!" ); return result; } } while (0)  | 
| 1959 | 			"Failed to initialize Link Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Link Level!" ); return result; } } while (0);  | 
| 1960 | |
| 1961 | result = fiji_populate_all_graphic_levels(hwmgr); | 
| 1962 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Graphics Level!" ); return result; } } while (0)  | 
| 1963 | 			"Failed to initialize Graphics Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Graphics Level!" ); return result; } } while (0);  | 
| 1964 | |
| 1965 | result = fiji_populate_all_memory_levels(hwmgr); | 
| 1966 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Memory Level!" ); return result; } } while (0)  | 
| 1967 | 			"Failed to initialize Memory Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Memory Level!" ); return result; } } while (0);  | 
| 1968 | |
| 1969 | result = fiji_populate_smc_acpi_level(hwmgr, table); | 
| 1970 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ACPI Level!" ); return result; } } while (0)  | 
| 1971 | 			"Failed to initialize ACPI Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ACPI Level!" ); return result; } } while (0);  | 
| 1972 | |
| 1973 | result = fiji_populate_smc_vce_level(hwmgr, table); | 
| 1974 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize VCE Level!" ); return result; } } while (0)  | 
| 1975 | 			"Failed to initialize VCE Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize VCE Level!" ); return result; } } while (0);  | 
| 1976 | |
| 1977 | result = fiji_populate_smc_acp_level(hwmgr, table); | 
| 1978 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ACP Level!" ); return result; } } while (0)  | 
| 1979 | 			"Failed to initialize ACP Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize ACP Level!" ); return result; } } while (0);  | 
| 1980 | |
| 1981 | /* Since only the initial state is completely set up at this point | 
| 1982 | * (the other states are just copies of the boot state) we only | 
| 1983 | * need to populate the ARB settings for the initial state. | 
| 1984 | */ | 
| 1985 | result = fiji_program_memory_timing_parameters(hwmgr); | 
| 1986 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to Write ARB settings for the initial state." ); return result; } } while (0)  | 
| 1987 | 			"Failed to Write ARB settings for the initial state.", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to Write ARB settings for the initial state." ); return result; } } while (0);  | 
| 1988 | |
| 1989 | result = fiji_populate_smc_uvd_level(hwmgr, table); | 
| 1990 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize UVD Level!" ); return result; } } while (0)  | 
| 1991 | 			"Failed to initialize UVD Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize UVD Level!" ); return result; } } while (0);  | 
| 1992 | |
| 1993 | result = fiji_populate_smc_boot_level(hwmgr, table); | 
| 1994 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Boot Level!" ); return result; } } while (0)  | 
| 1995 | 			"Failed to initialize Boot Level!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Boot Level!" ); return result; } } while (0);  | 
| 1996 | |
| 1997 | result = fiji_populate_smc_initailial_state(hwmgr); | 
| 1998 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Boot State!" ); return result; } } while (0)  | 
| 1999 | 			"Failed to initialize Boot State!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to initialize Boot State!" ); return result; } } while (0);  | 
| 2000 | |
| 2001 | result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr); | 
| 2002 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate BAPM Parameters!" ); return result; } } while (0)  | 
| 2003 | 			"Failed to populate BAPM Parameters!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate BAPM Parameters!" ); return result; } } while (0);  | 
| 2004 | |
| 2005 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 
| 2006 | PHM_PlatformCaps_ClockStretcher)) { | 
| 2007 | result = fiji_populate_clock_stretcher_data_table(hwmgr); | 
| 2008 | 		PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate Clock Stretcher Data Table!" ); return result; } } while (0)  | 
| 2009 | 				"Failed to populate Clock Stretcher Data Table!",do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate Clock Stretcher Data Table!" ); return result; } } while (0)  | 
| 2010 | 				return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate Clock Stretcher Data Table!" ); return result; } } while (0);  | 
| 2011 | } | 
| 2012 | |
| 2013 | table->GraphicsVoltageChangeEnable = 1; | 
| 2014 | table->GraphicsThermThrottleEnable = 1; | 
| 2015 | table->GraphicsInterval = 1; | 
| 2016 | table->VoltageInterval = 1; | 
| 2017 | table->ThermalInterval = 1; | 
| 2018 | table->TemperatureLimitHigh = | 
| 2019 | table_info->cac_dtp_table->usTargetOperatingTemp * | 
| 2020 | SMU7_Q88_FORMAT_CONVERSION_UNIT256; | 
| 2021 | table->TemperatureLimitLow = | 
| 2022 | (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * | 
| 2023 | SMU7_Q88_FORMAT_CONVERSION_UNIT256; | 
| 2024 | table->MemoryVoltageChangeEnable = 1; | 
| 2025 | table->MemoryInterval = 1; | 
| 2026 | table->VoltageResponseTime = 0; | 
| 2027 | table->PhaseResponseTime = 0; | 
| 2028 | table->MemoryThermThrottleEnable = 1; | 
| 2029 | table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ | 
| 2030 | table->PCIeGenInterval = 1; | 
| 2031 | table->VRConfig = 0; | 
| 2032 | |
| 2033 | result = fiji_populate_vr_config(hwmgr, table); | 
| 2034 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate VRConfig setting!" ); return result; } } while (0)  | 
| 2035 | 			"Failed to populate VRConfig setting!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to populate VRConfig setting!" ); return result; } } while (0);  | 
| 2036 | data->vr_config = table->VRConfig; | 
| 2037 | table->ThermGpio = 17; | 
| 2038 | table->SclkStepSize = 0x4000; | 
| 2039 | |
| 2040 | if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID61, &gpio_pin)) { | 
| 2041 | table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; | 
| 2042 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | 
| 2043 | PHM_PlatformCaps_RegulatorHot); | 
| 2044 | } else { | 
| 2045 | table->VRHotGpio = SMU7_UNUSED_GPIO_PIN0x7F; | 
| 2046 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | 
| 2047 | PHM_PlatformCaps_RegulatorHot); | 
| 2048 | } | 
| 2049 | |
| 2050 | if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID60, | 
| 2051 | &gpio_pin)) { | 
| 2052 | table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; | 
| 2053 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | 
| 2054 | PHM_PlatformCaps_AutomaticDCTransition); | 
| 2055 | } else { | 
| 2056 | table->AcDcGpio = SMU7_UNUSED_GPIO_PIN0x7F; | 
| 2057 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | 
| 2058 | PHM_PlatformCaps_AutomaticDCTransition); | 
| 2059 | } | 
| 2060 | |
| 2061 | /* Thermal Output GPIO */ | 
| 2062 | if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID65, | 
| 2063 | &gpio_pin)) { | 
| 2064 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | 
| 2065 | PHM_PlatformCaps_ThermalOutGPIO); | 
| 2066 | |
| 2067 | table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; | 
| 2068 | |
| 2069 | /* For porlarity read GPIOPAD_A with assigned Gpio pin | 
| 2070 | * since VBIOS will program this register to set 'inactive state', | 
| 2071 | * driver can then determine 'active state' from this and | 
| 2072 | * program SMU with correct polarity | 
| 2073 | */ | 
| 2074 | 		table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)(((struct cgs_device *)hwmgr->device)->ops->read_register (hwmgr->device,0x183)) &  | 
| 2075 | (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; | 
| 2076 | table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY0x1; | 
| 2077 | |
| 2078 | /* if required, combine VRHot/PCC with thermal out GPIO */ | 
| 2079 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 
| 2080 | PHM_PlatformCaps_RegulatorHot) && | 
| 2081 | phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 
| 2082 | PHM_PlatformCaps_CombinePCCWithThermalSignal)) | 
| 2083 | table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT0x2; | 
| 2084 | } else { | 
| 2085 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | 
| 2086 | PHM_PlatformCaps_ThermalOutGPIO); | 
| 2087 | table->ThermOutGpio = 17; | 
| 2088 | table->ThermOutPolarity = 1; | 
| 2089 | table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE0x0; | 
| 2090 | } | 
| 2091 | |
| 2092 | for (i = 0; i < SMU73_MAX_ENTRIES_SMIO32; i++) | 
| 2093 | 		table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i])(__uint32_t)(__builtin_constant_p(table->Smio[i]) ? (__uint32_t )(((__uint32_t)(table->Smio[i]) & 0xff) << 24 | ( (__uint32_t)(table->Smio[i]) & 0xff00) << 8 | (( __uint32_t)(table->Smio[i]) & 0xff0000) >> 8 | ( (__uint32_t)(table->Smio[i]) & 0xff000000) >> 24 ) : __swap32md(table->Smio[i]));  | 
| 2094 | |
| 2095 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags)((table->SystemFlags) = (__uint32_t)(__builtin_constant_p( table->SystemFlags) ? (__uint32_t)(((__uint32_t)(table-> SystemFlags) & 0xff) << 24 | ((__uint32_t)(table-> SystemFlags) & 0xff00) << 8 | ((__uint32_t)(table-> SystemFlags) & 0xff0000) >> 8 | ((__uint32_t)(table ->SystemFlags) & 0xff000000) >> 24) : __swap32md (table->SystemFlags)));  | 
| 2096 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig)((table->VRConfig) = (__uint32_t)(__builtin_constant_p(table ->VRConfig) ? (__uint32_t)(((__uint32_t)(table->VRConfig ) & 0xff) << 24 | ((__uint32_t)(table->VRConfig) & 0xff00) << 8 | ((__uint32_t)(table->VRConfig) & 0xff0000) >> 8 | ((__uint32_t)(table->VRConfig ) & 0xff000000) >> 24) : __swap32md(table->VRConfig )));  | 
| 2097 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1)((table->SmioMask1) = (__uint32_t)(__builtin_constant_p(table ->SmioMask1) ? (__uint32_t)(((__uint32_t)(table->SmioMask1 ) & 0xff) << 24 | ((__uint32_t)(table->SmioMask1 ) & 0xff00) << 8 | ((__uint32_t)(table->SmioMask1 ) & 0xff0000) >> 8 | ((__uint32_t)(table->SmioMask1 ) & 0xff000000) >> 24) : __swap32md(table->SmioMask1 )));  | 
| 2098 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2)((table->SmioMask2) = (__uint32_t)(__builtin_constant_p(table ->SmioMask2) ? (__uint32_t)(((__uint32_t)(table->SmioMask2 ) & 0xff) << 24 | ((__uint32_t)(table->SmioMask2 ) & 0xff00) << 8 | ((__uint32_t)(table->SmioMask2 ) & 0xff0000) >> 8 | ((__uint32_t)(table->SmioMask2 ) & 0xff000000) >> 24) : __swap32md(table->SmioMask2 )));  | 
| 2099 | 	CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize)((table->SclkStepSize) = (__uint32_t)(__builtin_constant_p (table->SclkStepSize) ? (__uint32_t)(((__uint32_t)(table-> SclkStepSize) & 0xff) << 24 | ((__uint32_t)(table-> SclkStepSize) & 0xff00) << 8 | ((__uint32_t)(table-> SclkStepSize) & 0xff0000) >> 8 | ((__uint32_t)(table ->SclkStepSize) & 0xff000000) >> 24) : __swap32md (table->SclkStepSize)));  | 
| 2100 | 	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh)((table->TemperatureLimitHigh) = (__uint16_t)(__builtin_constant_p (table->TemperatureLimitHigh) ? (__uint16_t)(((__uint16_t) (table->TemperatureLimitHigh) & 0xffU) << 8 | (( __uint16_t)(table->TemperatureLimitHigh) & 0xff00U) >> 8) : __swap16md(table->TemperatureLimitHigh)));  | 
| 2101 | 	CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow)((table->TemperatureLimitLow) = (__uint16_t)(__builtin_constant_p (table->TemperatureLimitLow) ? (__uint16_t)(((__uint16_t)( table->TemperatureLimitLow) & 0xffU) << 8 | ((__uint16_t )(table->TemperatureLimitLow) & 0xff00U) >> 8) : __swap16md(table->TemperatureLimitLow)));  | 
| 2102 | 	CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime)((table->VoltageResponseTime) = (__uint16_t)(__builtin_constant_p (table->VoltageResponseTime) ? (__uint16_t)(((__uint16_t)( table->VoltageResponseTime) & 0xffU) << 8 | ((__uint16_t )(table->VoltageResponseTime) & 0xff00U) >> 8) : __swap16md(table->VoltageResponseTime)));  | 
| 2103 | 	CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime)((table->PhaseResponseTime) = (__uint16_t)(__builtin_constant_p (table->PhaseResponseTime) ? (__uint16_t)(((__uint16_t)(table ->PhaseResponseTime) & 0xffU) << 8 | ((__uint16_t )(table->PhaseResponseTime) & 0xff00U) >> 8) : __swap16md (table->PhaseResponseTime)));  | 
| 2104 | |
| 2105 | /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ | 
| 2106 | result = smu7_copy_bytes_to_smc(hwmgr, | 
| 2107 | smu_data->smu7_data.dpm_table_start + | 
| 2108 | offsetof(SMU73_Discrete_DpmTable, SystemFlags)__builtin_offsetof(SMU73_Discrete_DpmTable, SystemFlags), | 
| 2109 | (uint8_t *)&(table->SystemFlags), | 
| 2110 | sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController), | 
| 2111 | SMC_RAM_END0x40000); | 
| 2112 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload dpm data to SMC memory!" ); return result; } } while (0)  | 
| 2113 | 			"Failed to upload dpm data to SMC memory!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload dpm data to SMC memory!" ); return result; } } while (0);  | 
| 2114 | |
| 2115 | result = fiji_init_arb_table_index(hwmgr); | 
| 2116 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload arb data to SMC memory!" ); return result; } } while (0)  | 
| 2117 | 			"Failed to upload arb data to SMC memory!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to upload arb data to SMC memory!" ); return result; } } while (0);  | 
| 2118 | |
| 2119 | result = fiji_populate_pm_fuses(hwmgr); | 
| 2120 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to  populate PM fuses to SMC memory!" ); return result; } } while (0)  | 
| 2121 | 			"Failed to  populate PM fuses to SMC memory!", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to  populate PM fuses to SMC memory!" ); return result; } } while (0);  | 
| 2122 | |
| 2123 | result = fiji_setup_dpm_led_config(hwmgr); | 
| 2124 | 	PP_ASSERT_WITH_CODE(0 == result,do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to setup dpm led config" ); return result; } } while (0)  | 
| 2125 | 			    "Failed to setup dpm led config", return result)do { if (!(0 == result)) { printk("\0014" "amdgpu: " "%s\n", "Failed to setup dpm led config" ); return result; } } while (0);  | 
| 2126 | |
| 2127 | return 0; | 
| 2128 | } | 
| 2129 | |
| 2130 | static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) | 
| 2131 | { | 
| 2132 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 2133 | |
| 2134 | SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE0 }; | 
| 2135 | uint32_t duty100; | 
| 2136 | uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; | 
| 2137 | uint16_t fdo_min, slope1, slope2; | 
| 2138 | uint32_t reference_clock; | 
| 2139 | int res; | 
| 2140 | uint64_t tmp64; | 
| 2141 | |
| 2142 | if (hwmgr->thermal_controller.fanInfo.bNoFan) { | 
| 2143 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | 
| 2144 | PHM_PlatformCaps_MicrocodeFanControl); | 
| 2145 | return 0; | 
| 2146 | } | 
| 2147 | |
| 2148 | if (smu_data->smu7_data.fan_table_start == 0) { | 
| 2149 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | 
| 2150 | PHM_PlatformCaps_MicrocodeFanControl); | 
| 2151 | return 0; | 
| 2152 | } | 
| 2153 | |
| 2154 | 	duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0300068))) & 0xff) >> 0x0)  | 
| 2155 | 			CG_FDO_CTRL1, FMAX_DUTY100)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0300068))) & 0xff) >> 0x0);  | 
| 2156 | |
| 2157 | if (duty100 == 0) { | 
| 2158 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | 
| 2159 | PHM_PlatformCaps_MicrocodeFanControl); | 
| 2160 | return 0; | 
| 2161 | } | 
| 2162 | |
| 2163 | tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. | 
| 2164 | usPWMMin * duty100; | 
| 2165 | 	do_div(tmp64, 10000)({ uint32_t __base = (10000); uint32_t __rem = ((uint64_t)(tmp64 )) % __base; (tmp64) = ((uint64_t)(tmp64)) / __base; __rem; } );  | 
| 2166 | fdo_min = (uint16_t)tmp64; | 
| 2167 | |
| 2168 | t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - | 
| 2169 | hwmgr->thermal_controller.advanceFanControlParameters.usTMin; | 
| 2170 | t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - | 
| 2171 | hwmgr->thermal_controller.advanceFanControlParameters.usTMed; | 
| 2172 | |
| 2173 | pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - | 
| 2174 | hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; | 
| 2175 | pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - | 
| 2176 | hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; | 
| 2177 | |
| 2178 | slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); | 
| 2179 | slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); | 
| 2180 | |
| 2181 | 	fan_table.TempMin = cpu_to_be16((50 + hwmgr->(__uint16_t)(__builtin_constant_p((50 + hwmgr-> thermal_controller .advanceFanControlParameters.usTMin) / 100) ? (__uint16_t)((( __uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters .usTMin) / 100) & 0xffU) << 8 | ((__uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMin ) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMin) / 100 ))  | 
| 2182 | 			thermal_controller.advanceFanControlParameters.usTMin) / 100)(__uint16_t)(__builtin_constant_p((50 + hwmgr-> thermal_controller .advanceFanControlParameters.usTMin) / 100) ? (__uint16_t)((( __uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters .usTMin) / 100) & 0xffU) << 8 | ((__uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMin ) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMin) / 100 ));  | 
| 2183 | 	fan_table.TempMed = cpu_to_be16((50 + hwmgr->(__uint16_t)(__builtin_constant_p((50 + hwmgr-> thermal_controller .advanceFanControlParameters.usTMed) / 100) ? (__uint16_t)((( __uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters .usTMed) / 100) & 0xffU) << 8 | ((__uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMed ) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMed) / 100 ))  | 
| 2184 | 			thermal_controller.advanceFanControlParameters.usTMed) / 100)(__uint16_t)(__builtin_constant_p((50 + hwmgr-> thermal_controller .advanceFanControlParameters.usTMed) / 100) ? (__uint16_t)((( __uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters .usTMed) / 100) & 0xffU) << 8 | ((__uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMed ) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMed) / 100 ));  | 
| 2185 | 	fan_table.TempMax = cpu_to_be16((50 + hwmgr->(__uint16_t)(__builtin_constant_p((50 + hwmgr-> thermal_controller .advanceFanControlParameters.usTMax) / 100) ? (__uint16_t)((( __uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters .usTMax) / 100) & 0xffU) << 8 | ((__uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMax ) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMax) / 100 ))  | 
| 2186 | 			thermal_controller.advanceFanControlParameters.usTMax) / 100)(__uint16_t)(__builtin_constant_p((50 + hwmgr-> thermal_controller .advanceFanControlParameters.usTMax) / 100) ? (__uint16_t)((( __uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters .usTMax) / 100) & 0xffU) << 8 | ((__uint16_t)((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMax ) / 100) & 0xff00U) >> 8) : __swap16md((50 + hwmgr-> thermal_controller.advanceFanControlParameters.usTMax) / 100 ));  | 
| 2187 | |
| 2188 | 	fan_table.Slope1 = cpu_to_be16(slope1)(__uint16_t)(__builtin_constant_p(slope1) ? (__uint16_t)(((__uint16_t )(slope1) & 0xffU) << 8 | ((__uint16_t)(slope1) & 0xff00U) >> 8) : __swap16md(slope1));  | 
| 2189 | 	fan_table.Slope2 = cpu_to_be16(slope2)(__uint16_t)(__builtin_constant_p(slope2) ? (__uint16_t)(((__uint16_t )(slope2) & 0xffU) << 8 | ((__uint16_t)(slope2) & 0xff00U) >> 8) : __swap16md(slope2));  | 
| 2190 | |
| 2191 | 	fan_table.FdoMin = cpu_to_be16(fdo_min)(__uint16_t)(__builtin_constant_p(fdo_min) ? (__uint16_t)(((__uint16_t )(fdo_min) & 0xffU) << 8 | ((__uint16_t)(fdo_min) & 0xff00U) >> 8) : __swap16md(fdo_min));  | 
| 2192 | |
| 2193 | 	fan_table.HystDown = cpu_to_be16(hwmgr->(__uint16_t)(__builtin_constant_p(hwmgr-> thermal_controller .advanceFanControlParameters.ucTHyst) ? (__uint16_t)(((__uint16_t )(hwmgr-> thermal_controller.advanceFanControlParameters.ucTHyst ) & 0xffU) << 8 | ((__uint16_t)(hwmgr-> thermal_controller .advanceFanControlParameters.ucTHyst) & 0xff00U) >> 8) : __swap16md(hwmgr-> thermal_controller.advanceFanControlParameters .ucTHyst))  | 
| 2194 | 			thermal_controller.advanceFanControlParameters.ucTHyst)(__uint16_t)(__builtin_constant_p(hwmgr-> thermal_controller .advanceFanControlParameters.ucTHyst) ? (__uint16_t)(((__uint16_t )(hwmgr-> thermal_controller.advanceFanControlParameters.ucTHyst ) & 0xffU) << 8 | ((__uint16_t)(hwmgr-> thermal_controller .advanceFanControlParameters.ucTHyst) & 0xff00U) >> 8) : __swap16md(hwmgr-> thermal_controller.advanceFanControlParameters .ucTHyst));  | 
| 2195 | |
| 2196 | 	fan_table.HystUp = cpu_to_be16(1)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t )(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U ) >> 8) : __swap16md(1));  | 
| 2197 | |
| 2198 | 	fan_table.HystSlope = cpu_to_be16(1)(__uint16_t)(__builtin_constant_p(1) ? (__uint16_t)(((__uint16_t )(1) & 0xffU) << 8 | ((__uint16_t)(1) & 0xff00U ) >> 8) : __swap16md(1));  | 
| 2199 | |
| 2200 | 	fan_table.TempRespLim = cpu_to_be16(5)(__uint16_t)(__builtin_constant_p(5) ? (__uint16_t)(((__uint16_t )(5) & 0xffU) << 8 | ((__uint16_t)(5) & 0xff00U ) >> 8) : __swap16md(5));  | 
| 2201 | |
| 2202 | 	reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev)((struct amdgpu_device *)hwmgr->adev)->asic_funcs->get_xclk (((struct amdgpu_device *)hwmgr->adev));  | 
| 2203 | |
| 2204 | 	fan_table.RefreshPeriod = cpu_to_be32((hwmgr->(__uint32_t)(__builtin_constant_p((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) ? (__uint32_t)(((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff) << 24 | ((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff00) << 8 | ((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff0000) >> 8 | ((__uint32_t)((hwmgr-> thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff000000) >> 24) : __swap32md ((hwmgr-> thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600))  | 
| 2205 | 			thermal_controller.advanceFanControlParameters.ulCycleDelay *(__uint32_t)(__builtin_constant_p((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) ? (__uint32_t)(((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff) << 24 | ((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff00) << 8 | ((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff0000) >> 8 | ((__uint32_t)((hwmgr-> thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff000000) >> 24) : __swap32md ((hwmgr-> thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600))  | 
| 2206 | 			reference_clock) / 1600)(__uint32_t)(__builtin_constant_p((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) ? (__uint32_t)(((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff) << 24 | ((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff00) << 8 | ((__uint32_t)((hwmgr-> thermal_controller .advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff0000) >> 8 | ((__uint32_t)((hwmgr-> thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600) & 0xff000000) >> 24) : __swap32md ((hwmgr-> thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600));  | 
| 2207 | |
| 2208 | 	fan_table.FdoMax = cpu_to_be16((uint16_t)duty100)(__uint16_t)(__builtin_constant_p((uint16_t)duty100) ? (__uint16_t )(((__uint16_t)((uint16_t)duty100) & 0xffU) << 8 | ( (__uint16_t)((uint16_t)duty100) & 0xff00U) >> 8) : __swap16md ((uint16_t)duty100));  | 
| 2209 | |
| 2210 | 	fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0300010))) & 0xff00000 ) >> 0x14)  | 
| 2211 | 			hwmgr->device, CGS_IND_REG__SMC,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0300010))) & 0xff00000 ) >> 0x14)  | 
| 2212 | 			CG_MULT_THERMAL_CTRL, TEMP_SEL)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0xc0300010))) & 0xff00000 ) >> 0x14);  | 
| 2213 | |
| 2214 | res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start, | 
| 2215 | (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), | 
| 2216 | SMC_RAM_END0x40000); | 
| 2217 | |
| 2218 | if (!res && hwmgr->thermal_controller. | 
| 2219 | advanceFanControlParameters.ucMinimumPWMLimit) | 
| 2220 | res = smum_send_msg_to_smc_with_parameter(hwmgr, | 
| 2221 | PPSMC_MSG_SetFanMinPwm((uint16_t) 0x209), | 
| 2222 | hwmgr->thermal_controller. | 
| 2223 | advanceFanControlParameters.ucMinimumPWMLimit, | 
| 2224 | NULL((void *)0)); | 
| 2225 | |
| 2226 | if (!res && hwmgr->thermal_controller. | 
| 2227 | advanceFanControlParameters.ulMinFanSCLKAcousticLimit) | 
| 2228 | res = smum_send_msg_to_smc_with_parameter(hwmgr, | 
| 2229 | PPSMC_MSG_SetFanSclkTarget((uint16_t) 0x206), | 
| 2230 | hwmgr->thermal_controller. | 
| 2231 | advanceFanControlParameters.ulMinFanSCLKAcousticLimit, | 
| 2232 | NULL((void *)0)); | 
| 2233 | |
| 2234 | if (res) | 
| 2235 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | 
| 2236 | PHM_PlatformCaps_MicrocodeFanControl); | 
| 2237 | |
| 2238 | return 0; | 
| 2239 | } | 
| 2240 | |
| 2241 | |
| 2242 | static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr) | 
| 2243 | { | 
| 2244 | if (!hwmgr->avfs_supported) | 
| 2245 | return 0; | 
| 2246 | |
| 2247 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs((uint16_t) 0x26A), NULL((void *)0)); | 
| 2248 | |
| 2249 | return 0; | 
| 2250 | } | 
| 2251 | |
| 2252 | static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) | 
| 2253 | { | 
| 2254 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 2255 | |
| 2256 | if (data->need_update_smu7_dpm_table & | 
| 2257 | (DPMTABLE_OD_UPDATE_SCLK0x00000001 + DPMTABLE_OD_UPDATE_MCLK0x00000002)) | 
| 2258 | return fiji_program_memory_timing_parameters(hwmgr); | 
| 2259 | |
| 2260 | return 0; | 
| 2261 | } | 
| 2262 | |
| 2263 | static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) | 
| 2264 | { | 
| 2265 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 2266 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 2267 | |
| 2268 | int result = 0; | 
| 2269 | uint32_t low_sclk_interrupt_threshold = 0; | 
| 2270 | |
| 2271 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 
| 2272 | PHM_PlatformCaps_SclkThrottleLowNotification) | 
| 2273 | && (data->low_sclk_interrupt_threshold != 0)) { | 
| 2274 | low_sclk_interrupt_threshold = | 
| 2275 | data->low_sclk_interrupt_threshold; | 
| 2276 | |
| 2277 | 		CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold)((low_sclk_interrupt_threshold) = (__uint32_t)(__builtin_constant_p (low_sclk_interrupt_threshold) ? (__uint32_t)(((__uint32_t)(low_sclk_interrupt_threshold ) & 0xff) << 24 | ((__uint32_t)(low_sclk_interrupt_threshold ) & 0xff00) << 8 | ((__uint32_t)(low_sclk_interrupt_threshold ) & 0xff0000) >> 8 | ((__uint32_t)(low_sclk_interrupt_threshold ) & 0xff000000) >> 24) : __swap32md(low_sclk_interrupt_threshold )));  | 
| 2278 | |
| 2279 | result = smu7_copy_bytes_to_smc( | 
| 2280 | hwmgr, | 
| 2281 | smu_data->smu7_data.dpm_table_start + | 
| 2282 | 				offsetof(SMU73_Discrete_DpmTable,__builtin_offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold )  | 
| 2283 | 					LowSclkInterruptThreshold)__builtin_offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold ),  | 
| 2284 | (uint8_t *)&low_sclk_interrupt_threshold, | 
| 2285 | sizeof(uint32_t), | 
| 2286 | SMC_RAM_END0x40000); | 
| 2287 | } | 
| 2288 | result = fiji_program_mem_timing_parameters(hwmgr); | 
| 2289 | 	PP_ASSERT_WITH_CODE((result == 0),do { if (!((result == 0))) { printk("\0014" "amdgpu: " "%s\n" , "Failed to program memory timing parameters!"); ; } } while (0)  | 
| 2290 | 			"Failed to program memory timing parameters!",do { if (!((result == 0))) { printk("\0014" "amdgpu: " "%s\n" , "Failed to program memory timing parameters!"); ; } } while (0)  | 
| 2291 | 			)do { if (!((result == 0))) { printk("\0014" "amdgpu: " "%s\n" , "Failed to program memory timing parameters!"); ; } } while (0);  | 
| 2292 | return result; | 
| 2293 | } | 
| 2294 | |
| 2295 | static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) | 
| 2296 | { | 
| 2297 | switch (type) { | 
| 2298 | case SMU_SoftRegisters: | 
| 2299 | switch (member) { | 
| 2300 | case HandshakeDisables: | 
| 2301 | return offsetof(SMU73_SoftRegisters, HandshakeDisables)__builtin_offsetof(SMU73_SoftRegisters, HandshakeDisables); | 
| 2302 | case VoltageChangeTimeout: | 
| 2303 | return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout)__builtin_offsetof(SMU73_SoftRegisters, VoltageChangeTimeout); | 
| 2304 | case AverageGraphicsActivity: | 
| 2305 | 			return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity)__builtin_offsetof(SMU73_SoftRegisters, AverageGraphicsActivity );  | 
| 2306 | case AverageMemoryActivity: | 
| 2307 | 			return offsetof(SMU73_SoftRegisters, AverageMemoryActivity)__builtin_offsetof(SMU73_SoftRegisters, AverageMemoryActivity );  | 
| 2308 | case PreVBlankGap: | 
| 2309 | return offsetof(SMU73_SoftRegisters, PreVBlankGap)__builtin_offsetof(SMU73_SoftRegisters, PreVBlankGap); | 
| 2310 | case VBlankTimeout: | 
| 2311 | return offsetof(SMU73_SoftRegisters, VBlankTimeout)__builtin_offsetof(SMU73_SoftRegisters, VBlankTimeout); | 
| 2312 | case UcodeLoadStatus: | 
| 2313 | return offsetof(SMU73_SoftRegisters, UcodeLoadStatus)__builtin_offsetof(SMU73_SoftRegisters, UcodeLoadStatus); | 
| 2314 | case DRAM_LOG_ADDR_H: | 
| 2315 | return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_H)__builtin_offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_H); | 
| 2316 | case DRAM_LOG_ADDR_L: | 
| 2317 | return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_L)__builtin_offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_L); | 
| 2318 | case DRAM_LOG_PHY_ADDR_H: | 
| 2319 | return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_H)__builtin_offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_H); | 
| 2320 | case DRAM_LOG_PHY_ADDR_L: | 
| 2321 | return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_L)__builtin_offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_L); | 
| 2322 | case DRAM_LOG_BUFF_SIZE: | 
| 2323 | return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE)__builtin_offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE); | 
| 2324 | } | 
| 2325 | break; | 
| 2326 | case SMU_Discrete_DpmTable: | 
| 2327 | switch (member) { | 
| 2328 | case UvdBootLevel: | 
| 2329 | return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, UvdBootLevel); | 
| 2330 | case VceBootLevel: | 
| 2331 | return offsetof(SMU73_Discrete_DpmTable, VceBootLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, VceBootLevel); | 
| 2332 | case LowSclkInterruptThreshold: | 
| 2333 | 			return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold)__builtin_offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold );  | 
| 2334 | } | 
| 2335 | break; | 
| 2336 | } | 
| 2337 | 	pr_warn("can't get the offset of type %x member %x\n", type, member)printk("\0014" "amdgpu: " "can't get the offset of type %x member %x\n" , type, member);  | 
| 2338 | return 0; | 
| 2339 | } | 
| 2340 | |
| 2341 | static uint32_t fiji_get_mac_definition(uint32_t value) | 
| 2342 | { | 
| 2343 | switch (value) { | 
| 2344 | case SMU_MAX_LEVELS_GRAPHICS: | 
| 2345 | return SMU73_MAX_LEVELS_GRAPHICS8; | 
| 2346 | case SMU_MAX_LEVELS_MEMORY: | 
| 2347 | return SMU73_MAX_LEVELS_MEMORY4; | 
| 2348 | case SMU_MAX_LEVELS_LINK: | 
| 2349 | return SMU73_MAX_LEVELS_LINK8; | 
| 2350 | case SMU_MAX_ENTRIES_SMIO: | 
| 2351 | return SMU73_MAX_ENTRIES_SMIO32; | 
| 2352 | case SMU_MAX_LEVELS_VDDC: | 
| 2353 | return SMU73_MAX_LEVELS_VDDC16; | 
| 2354 | case SMU_MAX_LEVELS_VDDGFX: | 
| 2355 | return SMU73_MAX_LEVELS_VDDGFX16; | 
| 2356 | case SMU_MAX_LEVELS_VDDCI: | 
| 2357 | return SMU73_MAX_LEVELS_VDDCI8; | 
| 2358 | case SMU_MAX_LEVELS_MVDD: | 
| 2359 | return SMU73_MAX_LEVELS_MVDD4; | 
| 2360 | } | 
| 2361 | |
| 2362 | pr_warn("can't get the mac of %x\n", value)printk("\0014" "amdgpu: " "can't get the mac of %x\n", value); | 
| 2363 | return 0; | 
| 2364 | } | 
| 2365 | |
| 2366 | |
| 2367 | static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) | 
| 2368 | { | 
| 2369 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 2370 | uint32_t mm_boot_level_offset, mm_boot_level_value; | 
| 2371 | struct phm_ppt_v1_information *table_info = | 
| 2372 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 2373 | |
| 2374 | smu_data->smc_state_table.UvdBootLevel = 0; | 
| 2375 | if (table_info->mm_dep_table->count > 0) | 
| 2376 | smu_data->smc_state_table.UvdBootLevel = | 
| 2377 | (uint8_t) (table_info->mm_dep_table->count - 1); | 
| 2378 | mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,__builtin_offsetof(SMU73_Discrete_DpmTable, UvdBootLevel) | 
| 2379 | UvdBootLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, UvdBootLevel); | 
| 2380 | mm_boot_level_offset /= 4; | 
| 2381 | mm_boot_level_offset *= 4; | 
| 2382 | 	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset))  | 
| 2383 | 			CGS_IND_REG__SMC, mm_boot_level_offset)(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset));  | 
| 2384 | mm_boot_level_value &= 0x00FFFFFF; | 
| 2385 | mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24; | 
| 2386 | 	cgs_write_ind_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value ))  | 
| 2387 | 			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value ));  | 
| 2388 | |
| 2389 | if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 
| 2390 | PHM_PlatformCaps_UVDDPM) || | 
| 2391 | phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 
| 2392 | PHM_PlatformCaps_StablePState)) | 
| 2393 | smum_send_msg_to_smc_with_parameter(hwmgr, | 
| 2394 | PPSMC_MSG_UVDDPM_SetEnabledMask((uint16_t) 0x12D), | 
| 2395 | (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel), | 
| 2396 | NULL((void *)0)); | 
| 2397 | return 0; | 
| 2398 | } | 
| 2399 | |
| 2400 | static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) | 
| 2401 | { | 
| 2402 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 2403 | uint32_t mm_boot_level_offset, mm_boot_level_value; | 
| 2404 | struct phm_ppt_v1_information *table_info = | 
| 2405 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 
| 2406 | |
| 2407 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 
| 2408 | PHM_PlatformCaps_StablePState)) | 
| 2409 | smu_data->smc_state_table.VceBootLevel = | 
| 2410 | (uint8_t) (table_info->mm_dep_table->count - 1); | 
| 2411 | else | 
| 2412 | smu_data->smc_state_table.VceBootLevel = 0; | 
| 2413 | |
| 2414 | mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + | 
| 2415 | offsetof(SMU73_Discrete_DpmTable, VceBootLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, VceBootLevel); | 
| 2416 | mm_boot_level_offset /= 4; | 
| 2417 | mm_boot_level_offset *= 4; | 
| 2418 | 	mm_boot_level_value = cgs_read_ind_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset))  | 
| 2419 | 			CGS_IND_REG__SMC, mm_boot_level_offset)(((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset));  | 
| 2420 | mm_boot_level_value &= 0xFF00FFFF; | 
| 2421 | mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16; | 
| 2422 | 	cgs_write_ind_register(hwmgr->device,(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value ))  | 
| 2423 | 			CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value)(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,mm_boot_level_offset,mm_boot_level_value ));  | 
| 2424 | |
| 2425 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) | 
| 2426 | smum_send_msg_to_smc_with_parameter(hwmgr, | 
| 2427 | PPSMC_MSG_VCEDPM_SetEnabledMask((uint16_t) 0x12E), | 
| 2428 | (uint32_t)1 << smu_data->smc_state_table.VceBootLevel, | 
| 2429 | NULL((void *)0)); | 
| 2430 | return 0; | 
| 2431 | } | 
| 2432 | |
| 2433 | static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) | 
| 2434 | { | 
| 2435 | switch (type) { | 
| 2436 | case SMU_UVD_TABLE: | 
| 2437 | fiji_update_uvd_smc_table(hwmgr); | 
| 2438 | break; | 
| 2439 | case SMU_VCE_TABLE: | 
| 2440 | fiji_update_vce_smc_table(hwmgr); | 
| 2441 | break; | 
| 2442 | default: | 
| 2443 | break; | 
| 2444 | } | 
| 2445 | return 0; | 
| 2446 | } | 
| 2447 | |
| 2448 | static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) | 
| 2449 | { | 
| 2450 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 2451 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); | 
| 2452 | uint32_t tmp; | 
| 2453 | int result; | 
| 2454 | bool_Bool error = false0; | 
| 2455 | |
| 2456 | result = smu7_read_smc_sram_dword(hwmgr, | 
| 2457 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + | 
| 2458 | offsetof(SMU73_Firmware_Header, DpmTable)__builtin_offsetof(SMU73_Firmware_Header, DpmTable), | 
| 2459 | &tmp, SMC_RAM_END0x40000); | 
| 2460 | |
| 2461 | if (0 == result) | 
| 2462 | smu_data->smu7_data.dpm_table_start = tmp; | 
| 2463 | |
| 2464 | error |= (0 != result); | 
| 2465 | |
| 2466 | result = smu7_read_smc_sram_dword(hwmgr, | 
| 2467 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + | 
| 2468 | offsetof(SMU73_Firmware_Header, SoftRegisters)__builtin_offsetof(SMU73_Firmware_Header, SoftRegisters), | 
| 2469 | &tmp, SMC_RAM_END0x40000); | 
| 2470 | |
| 2471 | if (!result) { | 
| 2472 | data->soft_regs_start = tmp; | 
| 2473 | smu_data->smu7_data.soft_regs_start = tmp; | 
| 2474 | } | 
| 2475 | |
| 2476 | error |= (0 != result); | 
| 2477 | |
| 2478 | result = smu7_read_smc_sram_dword(hwmgr, | 
| 2479 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + | 
| 2480 | offsetof(SMU73_Firmware_Header, mcRegisterTable)__builtin_offsetof(SMU73_Firmware_Header, mcRegisterTable), | 
| 2481 | &tmp, SMC_RAM_END0x40000); | 
| 2482 | |
| 2483 | if (!result) | 
| 2484 | smu_data->smu7_data.mc_reg_table_start = tmp; | 
| 2485 | |
| 2486 | result = smu7_read_smc_sram_dword(hwmgr, | 
| 2487 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + | 
| 2488 | offsetof(SMU73_Firmware_Header, FanTable)__builtin_offsetof(SMU73_Firmware_Header, FanTable), | 
| 2489 | &tmp, SMC_RAM_END0x40000); | 
| 2490 | |
| 2491 | if (!result) | 
| 2492 | smu_data->smu7_data.fan_table_start = tmp; | 
| 2493 | |
| 2494 | error |= (0 != result); | 
| 2495 | |
| 2496 | result = smu7_read_smc_sram_dword(hwmgr, | 
| 2497 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + | 
| 2498 | 			offsetof(SMU73_Firmware_Header, mcArbDramTimingTable)__builtin_offsetof(SMU73_Firmware_Header, mcArbDramTimingTable ),  | 
| 2499 | &tmp, SMC_RAM_END0x40000); | 
| 2500 | |
| 2501 | if (!result) | 
| 2502 | smu_data->smu7_data.arb_table_start = tmp; | 
| 2503 | |
| 2504 | error |= (0 != result); | 
| 2505 | |
| 2506 | result = smu7_read_smc_sram_dword(hwmgr, | 
| 2507 | SMU7_FIRMWARE_HEADER_LOCATION0x20000 + | 
| 2508 | offsetof(SMU73_Firmware_Header, Version)__builtin_offsetof(SMU73_Firmware_Header, Version), | 
| 2509 | &tmp, SMC_RAM_END0x40000); | 
| 2510 | |
| 2511 | if (!result) | 
| 2512 | hwmgr->microcode_version_info.SMC = tmp; | 
| 2513 | |
| 2514 | error |= (0 != result); | 
| 2515 | |
| 2516 | return error ? -1 : 0; | 
| 2517 | } | 
| 2518 | |
| 2519 | static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) | 
| 2520 | { | 
| 2521 | |
| 2522 | /* Program additional LP registers | 
| 2523 | * that are no longer programmed by VBIOS | 
| 2524 | */ | 
| 2525 | 	cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xa9b,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa28))))  | 
| 2526 | 			cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xa9b,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa28))));  | 
| 2527 | 	cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xa9c,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa29))))  | 
| 2528 | 			cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xa9c,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa29))));  | 
| 2529 | 	cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xa9e,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2b))))  | 
| 2530 | 			cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xa9e,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2b))));  | 
| 2531 | 	cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xaa0,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa30))))  | 
| 2532 | 			cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xaa0,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa30))));  | 
| 2533 | 	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xac7,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2d))))  | 
| 2534 | 			cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xac7,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2d))));  | 
| 2535 | 	cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xac8,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2e))))  | 
| 2536 | 			cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xac8,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2e))));  | 
| 2537 | 	cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xad3,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2c))))  | 
| 2538 | 			cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING))(((struct cgs_device *)hwmgr->device)->ops->write_register (hwmgr->device,0xad3,(((struct cgs_device *)hwmgr->device )->ops->read_register(hwmgr->device,0xa2c))));  | 
| 2539 | |
| 2540 | return 0; | 
| 2541 | } | 
| 2542 | |
| 2543 | static bool_Bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr) | 
| 2544 | { | 
| 2545 | 	return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x3f010))) & 0x2000) >> 0xd)  | 
| 2546 | 			CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)((((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,0x3f010))) & 0x2000) >> 0xd))  | 
| 2547 | ? true1 : false0; | 
| 2548 | } | 
| 2549 | |
| 2550 | static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr, | 
| 2551 | void *profile_setting) | 
| 2552 | { | 
| 2553 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 
| 2554 | struct fiji_smumgr *smu_data = (struct fiji_smumgr *) | 
| 2555 | (hwmgr->smu_backend); | 
| 2556 | struct profile_mode_setting *setting; | 
| 2557 | struct SMU73_Discrete_GraphicsLevel *levels = | 
| 2558 | smu_data->smc_state_table.GraphicsLevel; | 
| 2559 | uint32_t array = smu_data->smu7_data.dpm_table_start + | 
| 2560 | offsetof(SMU73_Discrete_DpmTable, GraphicsLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); | 
| 2561 | |
| 2562 | uint32_t mclk_array = smu_data->smu7_data.dpm_table_start + | 
| 2563 | offsetof(SMU73_Discrete_DpmTable, MemoryLevel)__builtin_offsetof(SMU73_Discrete_DpmTable, MemoryLevel); | 
| 2564 | struct SMU73_Discrete_MemoryLevel *mclk_levels = | 
| 2565 | smu_data->smc_state_table.MemoryLevel; | 
| 2566 | uint32_t i; | 
| 2567 | uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp; | 
| 2568 | |
| 2569 | if (profile_setting == NULL((void *)0)) | 
| 2570 | return -EINVAL22; | 
| 2571 | |
| 2572 | setting = (struct profile_mode_setting *)profile_setting; | 
| 2573 | |
| 2574 | if (setting->bupdate_sclk) { | 
| 2575 | if (!data->sclk_dpm_key_disabled) | 
| 2576 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel((uint16_t) 0x189), NULL((void *)0)); | 
| 2577 | for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { | 
| 2578 | if (levels[i].ActivityLevel != | 
| 2579 | 				cpu_to_be16(setting->sclk_activity)(__uint16_t)(__builtin_constant_p(setting->sclk_activity) ? (__uint16_t)(((__uint16_t)(setting->sclk_activity) & 0xffU ) << 8 | ((__uint16_t)(setting->sclk_activity) & 0xff00U) >> 8) : __swap16md(setting->sclk_activity) )) {  | 
| 2580 | 				levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity)(__uint16_t)(__builtin_constant_p(setting->sclk_activity) ? (__uint16_t)(((__uint16_t)(setting->sclk_activity) & 0xffU ) << 8 | ((__uint16_t)(setting->sclk_activity) & 0xff00U) >> 8) : __swap16md(setting->sclk_activity) );  | 
| 2581 | |
| 2582 | clk_activity_offset = array + (sizeof(SMU73_Discrete_GraphicsLevel) * i) | 
| 2583 | 						+ offsetof(SMU73_Discrete_GraphicsLevel, ActivityLevel)__builtin_offsetof(SMU73_Discrete_GraphicsLevel, ActivityLevel );  | 
| 2584 | offset = clk_activity_offset & ~0x3; | 
| 2585 | 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset))(__uint32_t)(__builtin_constant_p((((struct cgs_device *)hwmgr ->device)->ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC ,offset))) ? (__uint32_t)(((__uint32_t)((((struct cgs_device * )hwmgr->device)->ops->read_ind_register(hwmgr->device ,CGS_IND_REG__SMC,offset))) & 0xff) << 24 | ((__uint32_t )((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset))) & 0xff00) << 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)-> ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset ))) & 0xff0000) >> 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,offset))) & 0xff000000) >> 24 ) : __swap32md((((struct cgs_device *)hwmgr->device)->ops ->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset ))));  | 
| 2586 | tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t)); | 
| 2587 | 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp))(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset,(__uint32_t)(__builtin_constant_p (tmp) ? (__uint32_t)(((__uint32_t)(tmp) & 0xff) << 24 | ((__uint32_t)(tmp) & 0xff00) << 8 | ((__uint32_t )(tmp) & 0xff0000) >> 8 | ((__uint32_t)(tmp) & 0xff000000 ) >> 24) : __swap32md(tmp))));  | 
| 2588 | |
| 2589 | } | 
| 2590 | if (levels[i].UpHyst != setting->sclk_up_hyst || | 
| 2591 | levels[i].DownHyst != setting->sclk_down_hyst) { | 
| 2592 | levels[i].UpHyst = setting->sclk_up_hyst; | 
| 2593 | levels[i].DownHyst = setting->sclk_down_hyst; | 
| 2594 | up_hyst_offset = array + (sizeof(SMU73_Discrete_GraphicsLevel) * i) | 
| 2595 | + offsetof(SMU73_Discrete_GraphicsLevel, UpHyst)__builtin_offsetof(SMU73_Discrete_GraphicsLevel, UpHyst); | 
| 2596 | down_hyst_offset = array + (sizeof(SMU73_Discrete_GraphicsLevel) * i) | 
| 2597 | + offsetof(SMU73_Discrete_GraphicsLevel, DownHyst)__builtin_offsetof(SMU73_Discrete_GraphicsLevel, DownHyst); | 
| 2598 | offset = up_hyst_offset & ~0x3; | 
| 2599 | 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset))(__uint32_t)(__builtin_constant_p((((struct cgs_device *)hwmgr ->device)->ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC ,offset))) ? (__uint32_t)(((__uint32_t)((((struct cgs_device * )hwmgr->device)->ops->read_ind_register(hwmgr->device ,CGS_IND_REG__SMC,offset))) & 0xff) << 24 | ((__uint32_t )((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset))) & 0xff00) << 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)-> ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset ))) & 0xff0000) >> 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,offset))) & 0xff000000) >> 24 ) : __swap32md((((struct cgs_device *)hwmgr->device)->ops ->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset ))));  | 
| 2600 | tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpHyst, sizeof(uint8_t)); | 
| 2601 | tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownHyst, sizeof(uint8_t)); | 
| 2602 | 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp))(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset,(__uint32_t)(__builtin_constant_p (tmp) ? (__uint32_t)(((__uint32_t)(tmp) & 0xff) << 24 | ((__uint32_t)(tmp) & 0xff00) << 8 | ((__uint32_t )(tmp) & 0xff0000) >> 8 | ((__uint32_t)(tmp) & 0xff000000 ) >> 24) : __swap32md(tmp))));  | 
| 2603 | } | 
| 2604 | } | 
| 2605 | if (!data->sclk_dpm_key_disabled) | 
| 2606 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel((uint16_t) 0x18A), NULL((void *)0)); | 
| 2607 | } | 
| 2608 | |
| 2609 | if (setting->bupdate_mclk) { | 
| 2610 | if (!data->mclk_dpm_key_disabled) | 
| 2611 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel((uint16_t) 0x18B), NULL((void *)0)); | 
| 2612 | for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) { | 
| 2613 | if (mclk_levels[i].ActivityLevel != | 
| 2614 | 				cpu_to_be16(setting->mclk_activity)(__uint16_t)(__builtin_constant_p(setting->mclk_activity) ? (__uint16_t)(((__uint16_t)(setting->mclk_activity) & 0xffU ) << 8 | ((__uint16_t)(setting->mclk_activity) & 0xff00U) >> 8) : __swap16md(setting->mclk_activity) )) {  | 
| 2615 | 				mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity)(__uint16_t)(__builtin_constant_p(setting->mclk_activity) ? (__uint16_t)(((__uint16_t)(setting->mclk_activity) & 0xffU ) << 8 | ((__uint16_t)(setting->mclk_activity) & 0xff00U) >> 8) : __swap16md(setting->mclk_activity) );  | 
| 2616 | |
| 2617 | clk_activity_offset = mclk_array + (sizeof(SMU73_Discrete_MemoryLevel) * i) | 
| 2618 | + offsetof(SMU73_Discrete_MemoryLevel, ActivityLevel)__builtin_offsetof(SMU73_Discrete_MemoryLevel, ActivityLevel); | 
| 2619 | offset = clk_activity_offset & ~0x3; | 
| 2620 | 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset))(__uint32_t)(__builtin_constant_p((((struct cgs_device *)hwmgr ->device)->ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC ,offset))) ? (__uint32_t)(((__uint32_t)((((struct cgs_device * )hwmgr->device)->ops->read_ind_register(hwmgr->device ,CGS_IND_REG__SMC,offset))) & 0xff) << 24 | ((__uint32_t )((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset))) & 0xff00) << 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)-> ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset ))) & 0xff0000) >> 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,offset))) & 0xff000000) >> 24 ) : __swap32md((((struct cgs_device *)hwmgr->device)->ops ->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset ))));  | 
| 2621 | tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t)); | 
| 2622 | 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp))(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset,(__uint32_t)(__builtin_constant_p (tmp) ? (__uint32_t)(((__uint32_t)(tmp) & 0xff) << 24 | ((__uint32_t)(tmp) & 0xff00) << 8 | ((__uint32_t )(tmp) & 0xff0000) >> 8 | ((__uint32_t)(tmp) & 0xff000000 ) >> 24) : __swap32md(tmp))));  | 
| 2623 | |
| 2624 | } | 
| 2625 | if (mclk_levels[i].UpHyst != setting->mclk_up_hyst || | 
| 2626 | mclk_levels[i].DownHyst != setting->mclk_down_hyst) { | 
| 2627 | mclk_levels[i].UpHyst = setting->mclk_up_hyst; | 
| 2628 | mclk_levels[i].DownHyst = setting->mclk_down_hyst; | 
| 2629 | up_hyst_offset = mclk_array + (sizeof(SMU73_Discrete_MemoryLevel) * i) | 
| 2630 | + offsetof(SMU73_Discrete_MemoryLevel, UpHyst)__builtin_offsetof(SMU73_Discrete_MemoryLevel, UpHyst); | 
| 2631 | down_hyst_offset = mclk_array + (sizeof(SMU73_Discrete_MemoryLevel) * i) | 
| 2632 | + offsetof(SMU73_Discrete_MemoryLevel, DownHyst)__builtin_offsetof(SMU73_Discrete_MemoryLevel, DownHyst); | 
| 2633 | offset = up_hyst_offset & ~0x3; | 
| 2634 | 				tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset))(__uint32_t)(__builtin_constant_p((((struct cgs_device *)hwmgr ->device)->ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC ,offset))) ? (__uint32_t)(((__uint32_t)((((struct cgs_device * )hwmgr->device)->ops->read_ind_register(hwmgr->device ,CGS_IND_REG__SMC,offset))) & 0xff) << 24 | ((__uint32_t )((((struct cgs_device *)hwmgr->device)->ops->read_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset))) & 0xff00) << 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)-> ops->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset ))) & 0xff0000) >> 8 | ((__uint32_t)((((struct cgs_device *)hwmgr->device)->ops->read_ind_register(hwmgr-> device,CGS_IND_REG__SMC,offset))) & 0xff000000) >> 24 ) : __swap32md((((struct cgs_device *)hwmgr->device)->ops ->read_ind_register(hwmgr->device,CGS_IND_REG__SMC,offset ))));  | 
| 2635 | tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpHyst, sizeof(uint8_t)); | 
| 2636 | tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownHyst, sizeof(uint8_t)); | 
| 2637 | 				cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp))(((struct cgs_device *)hwmgr->device)->ops->write_ind_register (hwmgr->device,CGS_IND_REG__SMC,offset,(__uint32_t)(__builtin_constant_p (tmp) ? (__uint32_t)(((__uint32_t)(tmp) & 0xff) << 24 | ((__uint32_t)(tmp) & 0xff00) << 8 | ((__uint32_t )(tmp) & 0xff0000) >> 8 | ((__uint32_t)(tmp) & 0xff000000 ) >> 24) : __swap32md(tmp))));  | 
| 2638 | } | 
| 2639 | } | 
| 2640 | if (!data->mclk_dpm_key_disabled) | 
| 2641 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel((uint16_t) 0x18C), NULL((void *)0)); | 
| 2642 | } | 
| 2643 | return 0; | 
| 2644 | } | 
| 2645 | |
| 2646 | const struct pp_smumgr_func fiji_smu_funcs = { | 
| 2647 | .name = "fiji_smu", | 
| 2648 | .smu_init = &fiji_smu_init, | 
| 2649 | .smu_fini = &smu7_smu_fini, | 
| 2650 | .start_smu = &fiji_start_smu, | 
| 2651 | .check_fw_load_finish = &smu7_check_fw_load_finish, | 
| 2652 | .request_smu_load_fw = &smu7_reload_firmware, | 
| 2653 | .request_smu_load_specific_fw = NULL((void *)0), | 
| 2654 | .send_msg_to_smc = &smu7_send_msg_to_smc, | 
| 2655 | .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, | 
| 2656 | .get_argument = smu7_get_argument, | 
| 2657 | .download_pptable_settings = NULL((void *)0), | 
| 2658 | .upload_pptable_settings = NULL((void *)0), | 
| 2659 | .update_smc_table = fiji_update_smc_table, | 
| 2660 | .get_offsetof = fiji_get_offsetof, | 
| 2661 | .process_firmware_header = fiji_process_firmware_header, | 
| 2662 | .init_smc_table = fiji_init_smc_table, | 
| 2663 | .update_sclk_threshold = fiji_update_sclk_threshold, | 
| 2664 | .thermal_setup_fan_table = fiji_thermal_setup_fan_table, | 
| 2665 | .thermal_avfs_enable = fiji_thermal_avfs_enable, | 
| 2666 | .populate_all_graphic_levels = fiji_populate_all_graphic_levels, | 
| 2667 | .populate_all_memory_levels = fiji_populate_all_memory_levels, | 
| 2668 | .get_mac_definition = fiji_get_mac_definition, | 
| 2669 | .initialize_mc_reg_table = fiji_initialize_mc_reg_table, | 
| 2670 | .is_dpm_running = fiji_is_dpm_running, | 
| 2671 | .is_hw_avfs_present = fiji_is_hw_avfs_present, | 
| 2672 | .update_dpm_settings = fiji_update_dpm_settings, | 
| 2673 | }; |