| File: | dev/pci/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c | 
| Warning: | line 1657, column 2 Value stored to 'table_address' is never read | 
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* | 
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. | 
| 3 | * | 
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | 
| 5 | * copy of this software and associated documentation files (the "Software"), | 
| 6 | * to deal in the Software without restriction, including without limitation | 
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | 
| 9 | * Software is furnished to do so, subject to the following conditions: | 
| 10 | * | 
| 11 | * The above copyright notice and this permission notice shall be included in | 
| 12 | * all copies or substantial portions of the Software. | 
| 13 | * | 
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | 
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | 
| 21 | * | 
| 22 | */ | 
| 23 | #include "pp_debug.h" | 
| 24 | #include <linux/module.h> | 
| 25 | #include <linux/slab.h> | 
| 26 | #include <linux/delay.h> | 
| 27 | #include "atom.h" | 
| 28 | #include "ppatomctrl.h" | 
| 29 | #include "atombios.h" | 
| 30 | #include "cgs_common.h" | 
| 31 | #include "ppevvmath.h" | 
| 32 | |
| 33 | #define MEM_ID_MASK0xff000000 0xff000000 | 
| 34 | #define MEM_ID_SHIFT24 24 | 
| 35 | #define CLOCK_RANGE_MASK0x00ffffff 0x00ffffff | 
| 36 | #define CLOCK_RANGE_SHIFT0 0 | 
| 37 | #define LOW_NIBBLE_MASK0xf 0xf | 
| 38 | #define DATA_EQU_PREV0 0 | 
| 39 | #define DATA_FROM_TABLE4 4 | 
| 40 | |
| 41 | union voltage_object_info { | 
| 42 | struct _ATOM_VOLTAGE_OBJECT_INFO v1; | 
| 43 | struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2; | 
| 44 | struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3; | 
| 45 | }; | 
| 46 | |
| 47 | static int atomctrl_retrieve_ac_timing( | 
| 48 | uint8_t index, | 
| 49 | ATOM_INIT_REG_BLOCK *reg_block, | 
| 50 | pp_atomctrl_mc_reg_table *table) | 
| 51 | { | 
| 52 | uint32_t i, j; | 
| 53 | uint8_t tmem_id; | 
| 54 | ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *) | 
| 55 | ((uint8_t *)reg_block + (2 * sizeof(uint16_t)) + le16_to_cpu(reg_block->usRegIndexTblSize)((__uint16_t)(reg_block->usRegIndexTblSize))); | 
| 56 | |
| 57 | uint8_t num_ranges = 0; | 
| 58 | |
| 59 | while (*(uint32_t *)reg_data != END_OF_REG_DATA_BLOCK0x00000000 && | 
| 60 | num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES20) { | 
| 61 | tmem_id = (uint8_t)((*(uint32_t *)reg_data & MEM_ID_MASK0xff000000) >> MEM_ID_SHIFT24); | 
| 62 | |
| 63 | if (index == tmem_id) { | 
| 64 | table->mc_reg_table_entry[num_ranges].mclk_max = | 
| 65 | (uint32_t)((*(uint32_t *)reg_data & CLOCK_RANGE_MASK0x00ffffff) >> | 
| 66 | CLOCK_RANGE_SHIFT0); | 
| 67 | |
| 68 | for (i = 0, j = 1; i < table->last; i++) { | 
| 69 | if ((table->mc_reg_address[i].uc_pre_reg_data & | 
| 70 | LOW_NIBBLE_MASK0xf) == DATA_FROM_TABLE4) { | 
| 71 | table->mc_reg_table_entry[num_ranges].mc_data[i] = | 
| 72 | (uint32_t)*((uint32_t *)reg_data + j); | 
| 73 | j++; | 
| 74 | } else if ((table->mc_reg_address[i].uc_pre_reg_data & | 
| 75 | LOW_NIBBLE_MASK0xf) == DATA_EQU_PREV0) { | 
| 76 | table->mc_reg_table_entry[num_ranges].mc_data[i] = | 
| 77 | table->mc_reg_table_entry[num_ranges].mc_data[i-1]; | 
| 78 | } | 
| 79 | } | 
| 80 | num_ranges++; | 
| 81 | } | 
| 82 | |
| 83 | reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *) | 
| 84 | ((uint8_t *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)((__uint16_t)(reg_block->usRegDataBlkSize))) ; | 
| 85 | } | 
| 86 | |
| 87 | PP_ASSERT_WITH_CODE((*(uint32_t *)reg_data == END_OF_REG_DATA_BLOCK),do { if (!((*(uint32_t *)reg_data == 0x00000000))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."); return -1; } } while (0) | 
| 88 | "Invalid VramInfo table.", return -1)do { if (!((*(uint32_t *)reg_data == 0x00000000))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."); return -1; } } while (0); | 
| 89 | table->num_entries = num_ranges; | 
| 90 | |
| 91 | return 0; | 
| 92 | } | 
| 93 | |
| 94 | /** | 
| 95 | * atomctrl_set_mc_reg_address_table - Get memory clock AC timing registers index from VBIOS table | 
| 96 | * VBIOS set end of memory clock AC timing registers by ucPreRegDataLength bit6 = 1 | 
| 97 | * @reg_block: the address ATOM_INIT_REG_BLOCK | 
| 98 | * @table: the address of MCRegTable | 
| 99 | * Return: 0 | 
| 100 | */ | 
| 101 | static int atomctrl_set_mc_reg_address_table( | 
| 102 | ATOM_INIT_REG_BLOCK *reg_block, | 
| 103 | pp_atomctrl_mc_reg_table *table) | 
| 104 | { | 
| 105 | uint8_t i = 0; | 
| 106 | uint8_t num_entries = (uint8_t)((le16_to_cpu(reg_block->usRegIndexTblSize)((__uint16_t)(reg_block->usRegIndexTblSize))) | 
| 107 | / sizeof(ATOM_INIT_REG_INDEX_FORMAT)); | 
| 108 | ATOM_INIT_REG_INDEX_FORMAT *format = ®_block->asRegIndexBuf[0]; | 
| 109 | |
| 110 | num_entries--; /* subtract 1 data end mark entry */ | 
| 111 | |
| 112 | PP_ASSERT_WITH_CODE((num_entries <= VBIOS_MC_REGISTER_ARRAY_SIZE),do { if (!((num_entries <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."); return -1; } } while (0) | 
| 113 | "Invalid VramInfo table.", return -1)do { if (!((num_entries <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."); return -1; } } while (0); | 
| 114 | |
| 115 | /* ucPreRegDataLength bit6 = 1 is the end of memory clock AC timing registers */ | 
| 116 | while ((!(format->ucPreRegDataLength & ACCESS_PLACEHOLDER0x80)) && | 
| 117 | (i < num_entries)) { | 
| 118 | table->mc_reg_address[i].s1 = | 
| 119 | (uint16_t)(le16_to_cpu(format->usRegIndex)((__uint16_t)(format->usRegIndex))); | 
| 120 | table->mc_reg_address[i].uc_pre_reg_data = | 
| 121 | format->ucPreRegDataLength; | 
| 122 | |
| 123 | i++; | 
| 124 | format = (ATOM_INIT_REG_INDEX_FORMAT *) | 
| 125 | ((uint8_t *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT)); | 
| 126 | } | 
| 127 | |
| 128 | table->last = i; | 
| 129 | return 0; | 
| 130 | } | 
| 131 | |
| 132 | int atomctrl_initialize_mc_reg_table( | 
| 133 | struct pp_hwmgr *hwmgr, | 
| 134 | uint8_t module_index, | 
| 135 | pp_atomctrl_mc_reg_table *table) | 
| 136 | { | 
| 137 | ATOM_VRAM_INFO_HEADER_V2_1 *vram_info; | 
| 138 | ATOM_INIT_REG_BLOCK *reg_block; | 
| 139 | int result = 0; | 
| 140 | u8 frev, crev; | 
| 141 | u16 size; | 
| 142 | |
| 143 | vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *) | 
| 144 | smu_atom_get_data_table(hwmgr->adev, | 
| 145 | GetIndexIntoMasterTable(DATA, VRAM_Info)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, VRAM_Info ) / sizeof(USHORT)), &size, &frev, &crev); | 
| 146 | |
| 147 | if (module_index >= vram_info->ucNumOfVRAMModule) { | 
| 148 | pr_err("Invalid VramInfo table.")printk("\0013" "amdgpu: " "Invalid VramInfo table."); | 
| 149 | result = -1; | 
| 150 | } else if (vram_info->sHeader.ucTableFormatRevision < 2) { | 
| 151 | pr_err("Invalid VramInfo table.")printk("\0013" "amdgpu: " "Invalid VramInfo table."); | 
| 152 | result = -1; | 
| 153 | } | 
| 154 | |
| 155 | if (0 == result) { | 
| 156 | reg_block = (ATOM_INIT_REG_BLOCK *) | 
| 157 | ((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset)((__uint16_t)(vram_info->usMemClkPatchTblOffset))); | 
| 158 | result = atomctrl_set_mc_reg_address_table(reg_block, table); | 
| 159 | } | 
| 160 | |
| 161 | if (0 == result) { | 
| 162 | result = atomctrl_retrieve_ac_timing(module_index, | 
| 163 | reg_block, table); | 
| 164 | } | 
| 165 | |
| 166 | return result; | 
| 167 | } | 
| 168 | |
| 169 | int atomctrl_initialize_mc_reg_table_v2_2( | 
| 170 | struct pp_hwmgr *hwmgr, | 
| 171 | uint8_t module_index, | 
| 172 | pp_atomctrl_mc_reg_table *table) | 
| 173 | { | 
| 174 | ATOM_VRAM_INFO_HEADER_V2_2 *vram_info; | 
| 175 | ATOM_INIT_REG_BLOCK *reg_block; | 
| 176 | int result = 0; | 
| 177 | u8 frev, crev; | 
| 178 | u16 size; | 
| 179 | |
| 180 | vram_info = (ATOM_VRAM_INFO_HEADER_V2_2 *) | 
| 181 | smu_atom_get_data_table(hwmgr->adev, | 
| 182 | GetIndexIntoMasterTable(DATA, VRAM_Info)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, VRAM_Info ) / sizeof(USHORT)), &size, &frev, &crev); | 
| 183 | |
| 184 | if (module_index >= vram_info->ucNumOfVRAMModule) { | 
| 185 | pr_err("Invalid VramInfo table.")printk("\0013" "amdgpu: " "Invalid VramInfo table."); | 
| 186 | result = -1; | 
| 187 | } else if (vram_info->sHeader.ucTableFormatRevision < 2) { | 
| 188 | pr_err("Invalid VramInfo table.")printk("\0013" "amdgpu: " "Invalid VramInfo table."); | 
| 189 | result = -1; | 
| 190 | } | 
| 191 | |
| 192 | if (0 == result) { | 
| 193 | reg_block = (ATOM_INIT_REG_BLOCK *) | 
| 194 | ((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset)((__uint16_t)(vram_info->usMemClkPatchTblOffset))); | 
| 195 | result = atomctrl_set_mc_reg_address_table(reg_block, table); | 
| 196 | } | 
| 197 | |
| 198 | if (0 == result) { | 
| 199 | result = atomctrl_retrieve_ac_timing(module_index, | 
| 200 | reg_block, table); | 
| 201 | } | 
| 202 | |
| 203 | return result; | 
| 204 | } | 
| 205 | |
| 206 | /* | 
| 207 | * Set DRAM timings based on engine clock and memory clock. | 
| 208 | */ | 
| 209 | int atomctrl_set_engine_dram_timings_rv770( | 
| 210 | struct pp_hwmgr *hwmgr, | 
| 211 | uint32_t engine_clock, | 
| 212 | uint32_t memory_clock) | 
| 213 | { | 
| 214 | struct amdgpu_device *adev = hwmgr->adev; | 
| 215 | |
| 216 | SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters; | 
| 217 | |
| 218 | /* They are both in 10KHz Units. */ | 
| 219 | engine_clock_parameters.ulTargetEngineClock = | 
| 220 | cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |((__uint32_t)((engine_clock & 0x00FFFFFF) | ((2 << 24 )))) | 
| 221 | ((COMPUTE_ENGINE_PLL_PARAM << 24)))((__uint32_t)((engine_clock & 0x00FFFFFF) | ((2 << 24 )))); | 
| 222 | |
| 223 | /* in 10 khz units.*/ | 
| 224 | engine_clock_parameters.sReserved.ulClock = | 
| 225 | cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK)((__uint32_t)(memory_clock & 0x00FFFFFF)); | 
| 226 | |
| 227 | return amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 228 | GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, DynamicMemorySettings ) / sizeof(USHORT)), | 
| 229 | (uint32_t *)&engine_clock_parameters); | 
| 230 | } | 
| 231 | |
| 232 | /* | 
| 233 | * Private Function to get the PowerPlay Table Address. | 
| 234 | * WARNING: The tabled returned by this function is in | 
| 235 | * dynamically allocated memory. | 
| 236 | * The caller has to release if by calling kfree. | 
| 237 | */ | 
| 238 | static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device) | 
| 239 | { | 
| 240 | int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, VoltageObjectInfo ) / sizeof(USHORT)); | 
| 241 | u8 frev, crev; | 
| 242 | u16 size; | 
| 243 | union voltage_object_info *voltage_info; | 
| 244 | |
| 245 | voltage_info = (union voltage_object_info *) | 
| 246 | smu_atom_get_data_table(device, index, | 
| 247 | &size, &frev, &crev); | 
| 248 | |
| 249 | if (voltage_info != NULL((void *)0)) | 
| 250 | return (ATOM_VOLTAGE_OBJECT_INFO *) &(voltage_info->v3); | 
| 251 | else | 
| 252 | return NULL((void *)0); | 
| 253 | } | 
| 254 | |
| 255 | static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3( | 
| 256 | const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table, | 
| 257 | uint8_t voltage_type, uint8_t voltage_mode) | 
| 258 | { | 
| 259 | unsigned int size = le16_to_cpu(voltage_object_info_table->sHeader.usStructureSize)((__uint16_t)(voltage_object_info_table->sHeader.usStructureSize )); | 
| 260 | unsigned int offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0])__builtin_offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj [0]); | 
| 261 | uint8_t *start = (uint8_t *)voltage_object_info_table; | 
| 262 | |
| 263 | while (offset < size) { | 
| 264 | const ATOM_VOLTAGE_OBJECT_V3 *voltage_object = | 
| 265 | (const ATOM_VOLTAGE_OBJECT_V3 *)(start + offset); | 
| 266 | |
| 267 | if (voltage_type == voltage_object->asGpioVoltageObj.sHeader.ucVoltageType && | 
| 268 | voltage_mode == voltage_object->asGpioVoltageObj.sHeader.ucVoltageMode) | 
| 269 | return voltage_object; | 
| 270 | |
| 271 | offset += le16_to_cpu(voltage_object->asGpioVoltageObj.sHeader.usSize)((__uint16_t)(voltage_object->asGpioVoltageObj.sHeader.usSize )); | 
| 272 | } | 
| 273 | |
| 274 | return NULL((void *)0); | 
| 275 | } | 
| 276 | |
| 277 | /** | 
| 278 | * atomctrl_get_memory_pll_dividers_si | 
| 279 | * | 
| 280 | * @hwmgr: input parameter: pointer to HwMgr | 
| 281 | * @clock_value: input parameter: memory clock | 
| 282 | * @mpll_param: output parameter: memory clock parameters | 
| 283 | * @strobe_mode: input parameter: 1 for strobe mode, 0 for performance mode | 
| 284 | */ | 
| 285 | int atomctrl_get_memory_pll_dividers_si( | 
| 286 | struct pp_hwmgr *hwmgr, | 
| 287 | uint32_t clock_value, | 
| 288 | pp_atomctrl_memory_clock_param *mpll_param, | 
| 289 | bool_Bool strobe_mode) | 
| 290 | { | 
| 291 | struct amdgpu_device *adev = hwmgr->adev; | 
| 292 | COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters; | 
| 293 | int result; | 
| 294 | |
| 295 | mpll_parameters.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); | 
| 296 | mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0); | 
| 297 | |
| 298 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 299 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ComputeMemoryClockParam ) / sizeof(USHORT)), | 
| 300 | (uint32_t *)&mpll_parameters); | 
| 301 | |
| 302 | if (0 == result) { | 
| 303 | mpll_param->mpll_fb_divider.clk_frac = | 
| 304 | le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac)((__uint16_t)(mpll_parameters.ulFbDiv.usFbDivFrac)); | 
| 305 | mpll_param->mpll_fb_divider.cl_kf = | 
| 306 | le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv)((__uint16_t)(mpll_parameters.ulFbDiv.usFbDiv)); | 
| 307 | mpll_param->mpll_post_divider = | 
| 308 | (uint32_t)mpll_parameters.ucPostDiv; | 
| 309 | mpll_param->vco_mode = | 
| 310 | (uint32_t)(mpll_parameters.ucPllCntlFlag & | 
| 311 | MPLL_CNTL_FLAG_VCO_MODE_MASK0x03); | 
| 312 | mpll_param->yclk_sel = | 
| 313 | (uint32_t)((mpll_parameters.ucPllCntlFlag & | 
| 314 | MPLL_CNTL_FLAG_BYPASS_DQ_PLL0x04) ? 1 : 0); | 
| 315 | mpll_param->qdr = | 
| 316 | (uint32_t)((mpll_parameters.ucPllCntlFlag & | 
| 317 | MPLL_CNTL_FLAG_QDR_ENABLE0x08) ? 1 : 0); | 
| 318 | mpll_param->half_rate = | 
| 319 | (uint32_t)((mpll_parameters.ucPllCntlFlag & | 
| 320 | MPLL_CNTL_FLAG_AD_HALF_RATE0x10) ? 1 : 0); | 
| 321 | mpll_param->dll_speed = | 
| 322 | (uint32_t)(mpll_parameters.ucDllSpeed); | 
| 323 | mpll_param->bw_ctrl = | 
| 324 | (uint32_t)(mpll_parameters.ucBWCntl); | 
| 325 | } | 
| 326 | |
| 327 | return result; | 
| 328 | } | 
| 329 | |
| 330 | /** | 
| 331 | * atomctrl_get_memory_pll_dividers_vi | 
| 332 | * | 
| 333 | * @hwmgr: input parameter: pointer to HwMgr | 
| 334 | * @clock_value: input parameter: memory clock | 
| 335 | * @mpll_param: output parameter: memory clock parameters | 
| 336 | */ | 
| 337 | int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, | 
| 338 | uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param) | 
| 339 | { | 
| 340 | struct amdgpu_device *adev = hwmgr->adev; | 
| 341 | COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters; | 
| 342 | int result; | 
| 343 | |
| 344 | mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); | 
| 345 | |
| 346 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 347 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ComputeMemoryClockParam ) / sizeof(USHORT)), | 
| 348 | (uint32_t *)&mpll_parameters); | 
| 349 | |
| 350 | if (!result) | 
| 351 | mpll_param->mpll_post_divider = | 
| 352 | (uint32_t)mpll_parameters.ulClock.ucPostDiv; | 
| 353 | |
| 354 | return result; | 
| 355 | } | 
| 356 | |
| 357 | int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr, | 
| 358 | uint32_t clock_value, | 
| 359 | pp_atomctrl_memory_clock_param_ai *mpll_param) | 
| 360 | { | 
| 361 | struct amdgpu_device *adev = hwmgr->adev; | 
| 362 | COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0}; | 
| 363 | int result; | 
| 364 | |
| 365 | mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); | 
| 366 | |
| 367 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 368 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ComputeMemoryClockParam ) / sizeof(USHORT)), | 
| 369 | (uint32_t *)&mpll_parameters); | 
| 370 | |
| 371 | /* VEGAM's mpll takes sometime to finish computing */ | 
| 372 | udelay(10); | 
| 373 | |
| 374 | if (!result) { | 
| 375 | mpll_param->ulMclk_fcw_int = | 
| 376 | le16_to_cpu(mpll_parameters.usMclk_fcw_int)((__uint16_t)(mpll_parameters.usMclk_fcw_int)); | 
| 377 | mpll_param->ulMclk_fcw_frac = | 
| 378 | le16_to_cpu(mpll_parameters.usMclk_fcw_frac)((__uint16_t)(mpll_parameters.usMclk_fcw_frac)); | 
| 379 | mpll_param->ulClock = | 
| 380 | le32_to_cpu(mpll_parameters.ulClock.ulClock)((__uint32_t)(mpll_parameters.ulClock.ulClock)); | 
| 381 | mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv; | 
| 382 | } | 
| 383 | |
| 384 | return result; | 
| 385 | } | 
| 386 | |
| 387 | int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, | 
| 388 | uint32_t clock_value, | 
| 389 | pp_atomctrl_clock_dividers_kong *dividers) | 
| 390 | { | 
| 391 | struct amdgpu_device *adev = hwmgr->adev; | 
| 392 | COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters; | 
| 393 | int result; | 
| 394 | |
| 395 | pll_parameters.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); | 
| 396 | |
| 397 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 398 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ComputeMemoryEnginePLL ) / sizeof(USHORT)), | 
| 399 | (uint32_t *)&pll_parameters); | 
| 400 | |
| 401 | if (0 == result) { | 
| 402 | dividers->pll_post_divider = pll_parameters.ucPostDiv; | 
| 403 | dividers->real_clock = le32_to_cpu(pll_parameters.ulClock)((__uint32_t)(pll_parameters.ulClock)); | 
| 404 | } | 
| 405 | |
| 406 | return result; | 
| 407 | } | 
| 408 | |
| 409 | int atomctrl_get_engine_pll_dividers_vi( | 
| 410 | struct pp_hwmgr *hwmgr, | 
| 411 | uint32_t clock_value, | 
| 412 | pp_atomctrl_clock_dividers_vi *dividers) | 
| 413 | { | 
| 414 | struct amdgpu_device *adev = hwmgr->adev; | 
| 415 | COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; | 
| 416 | int result; | 
| 417 | |
| 418 | pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); | 
| 419 | pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK0x01; | 
| 420 | |
| 421 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 422 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ComputeMemoryEnginePLL ) / sizeof(USHORT)), | 
| 423 | (uint32_t *)&pll_patameters); | 
| 424 | |
| 425 | if (0 == result) { | 
| 426 | dividers->pll_post_divider = | 
| 427 | pll_patameters.ulClock.ucPostDiv; | 
| 428 | dividers->real_clock = | 
| 429 | le32_to_cpu(pll_patameters.ulClock.ulClock)((__uint32_t)(pll_patameters.ulClock.ulClock)); | 
| 430 | |
| 431 | dividers->ul_fb_div.ul_fb_div_frac = | 
| 432 | le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac)((__uint16_t)(pll_patameters.ulFbDiv.usFbDivFrac)); | 
| 433 | dividers->ul_fb_div.ul_fb_div = | 
| 434 | le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv)((__uint16_t)(pll_patameters.ulFbDiv.usFbDiv)); | 
| 435 | |
| 436 | dividers->uc_pll_ref_div = | 
| 437 | pll_patameters.ucPllRefDiv; | 
| 438 | dividers->uc_pll_post_div = | 
| 439 | pll_patameters.ucPllPostDiv; | 
| 440 | dividers->uc_pll_cntl_flag = | 
| 441 | pll_patameters.ucPllCntlFlag; | 
| 442 | } | 
| 443 | |
| 444 | return result; | 
| 445 | } | 
| 446 | |
| 447 | int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, | 
| 448 | uint32_t clock_value, | 
| 449 | pp_atomctrl_clock_dividers_ai *dividers) | 
| 450 | { | 
| 451 | struct amdgpu_device *adev = hwmgr->adev; | 
| 452 | COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters; | 
| 453 | int result; | 
| 454 | |
| 455 | pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); | 
| 456 | pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK0x01; | 
| 457 | |
| 458 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 459 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ComputeMemoryEnginePLL ) / sizeof(USHORT)), | 
| 460 | (uint32_t *)&pll_patameters); | 
| 461 | |
| 462 | if (0 == result) { | 
| 463 | dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac)((__uint16_t)(pll_patameters.usSclk_fcw_frac)); | 
| 464 | dividers->usSclk_fcw_int = le16_to_cpu(pll_patameters.usSclk_fcw_int)((__uint16_t)(pll_patameters.usSclk_fcw_int)); | 
| 465 | dividers->ucSclkPostDiv = pll_patameters.ucSclkPostDiv; | 
| 466 | dividers->ucSclkVcoMode = pll_patameters.ucSclkVcoMode; | 
| 467 | dividers->ucSclkPllRange = pll_patameters.ucSclkPllRange; | 
| 468 | dividers->ucSscEnable = pll_patameters.ucSscEnable; | 
| 469 | dividers->usSsc_fcw1_frac = le16_to_cpu(pll_patameters.usSsc_fcw1_frac)((__uint16_t)(pll_patameters.usSsc_fcw1_frac)); | 
| 470 | dividers->usSsc_fcw1_int = le16_to_cpu(pll_patameters.usSsc_fcw1_int)((__uint16_t)(pll_patameters.usSsc_fcw1_int)); | 
| 471 | dividers->usPcc_fcw_int = le16_to_cpu(pll_patameters.usPcc_fcw_int)((__uint16_t)(pll_patameters.usPcc_fcw_int)); | 
| 472 | dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac)((__uint16_t)(pll_patameters.usSsc_fcw_slew_frac)); | 
| 473 | dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac)((__uint16_t)(pll_patameters.usPcc_fcw_slew_frac)); | 
| 474 | } | 
| 475 | return result; | 
| 476 | } | 
| 477 | |
| 478 | int atomctrl_get_dfs_pll_dividers_vi( | 
| 479 | struct pp_hwmgr *hwmgr, | 
| 480 | uint32_t clock_value, | 
| 481 | pp_atomctrl_clock_dividers_vi *dividers) | 
| 482 | { | 
| 483 | struct amdgpu_device *adev = hwmgr->adev; | 
| 484 | COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; | 
| 485 | int result; | 
| 486 | |
| 487 | pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); | 
| 488 | pll_patameters.ulClock.ucPostDiv = | 
| 489 | COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK0x00; | 
| 490 | |
| 491 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 492 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ComputeMemoryEnginePLL ) / sizeof(USHORT)), | 
| 493 | (uint32_t *)&pll_patameters); | 
| 494 | |
| 495 | if (0 == result) { | 
| 496 | dividers->pll_post_divider = | 
| 497 | pll_patameters.ulClock.ucPostDiv; | 
| 498 | dividers->real_clock = | 
| 499 | le32_to_cpu(pll_patameters.ulClock.ulClock)((__uint32_t)(pll_patameters.ulClock.ulClock)); | 
| 500 | |
| 501 | dividers->ul_fb_div.ul_fb_div_frac = | 
| 502 | le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac)((__uint16_t)(pll_patameters.ulFbDiv.usFbDivFrac)); | 
| 503 | dividers->ul_fb_div.ul_fb_div = | 
| 504 | le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv)((__uint16_t)(pll_patameters.ulFbDiv.usFbDiv)); | 
| 505 | |
| 506 | dividers->uc_pll_ref_div = | 
| 507 | pll_patameters.ucPllRefDiv; | 
| 508 | dividers->uc_pll_post_div = | 
| 509 | pll_patameters.ucPllPostDiv; | 
| 510 | dividers->uc_pll_cntl_flag = | 
| 511 | pll_patameters.ucPllCntlFlag; | 
| 512 | } | 
| 513 | |
| 514 | return result; | 
| 515 | } | 
| 516 | |
| 517 | /* | 
| 518 | * Get the reference clock in 10KHz | 
| 519 | */ | 
| 520 | uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr) | 
| 521 | { | 
| 522 | ATOM_FIRMWARE_INFO *fw_info; | 
| 523 | u8 frev, crev; | 
| 524 | u16 size; | 
| 525 | uint32_t clock; | 
| 526 | |
| 527 | fw_info = (ATOM_FIRMWARE_INFO *) | 
| 528 | smu_atom_get_data_table(hwmgr->adev, | 
| 529 | GetIndexIntoMasterTable(DATA, FirmwareInfo)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, FirmwareInfo ) / sizeof(USHORT)), | 
| 530 | &size, &frev, &crev); | 
| 531 | |
| 532 | if (fw_info == NULL((void *)0)) | 
| 533 | clock = 2700; | 
| 534 | else | 
| 535 | clock = (uint32_t)(le16_to_cpu(fw_info->usReferenceClock)((__uint16_t)(fw_info->usReferenceClock))); | 
| 536 | |
| 537 | return clock; | 
| 538 | } | 
| 539 | |
| 540 | /* | 
| 541 | * Returns true if the given voltage type is controlled by GPIO pins. | 
| 542 | * voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC, | 
| 543 | * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ. | 
| 544 | * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE | 
| 545 | */ | 
| 546 | bool_Bool atomctrl_is_voltage_controlled_by_gpio_v3( | 
| 547 | struct pp_hwmgr *hwmgr, | 
| 548 | uint8_t voltage_type, | 
| 549 | uint8_t voltage_mode) | 
| 550 | { | 
| 551 | ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = | 
| 552 | (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev); | 
| 553 | bool_Bool ret; | 
| 554 | |
| 555 | PP_ASSERT_WITH_CODE((NULL != voltage_info),do { if (!((((void *)0) != voltage_info))) { printk("\0014" "amdgpu: " "%s\n", "Could not find Voltage Table in BIOS."); return 0;; } } while (0) | 
| 556 | "Could not find Voltage Table in BIOS.", return false;)do { if (!((((void *)0) != voltage_info))) { printk("\0014" "amdgpu: " "%s\n", "Could not find Voltage Table in BIOS."); return 0;; } } while (0); | 
| 557 | |
| 558 | ret = (NULL((void *)0) != atomctrl_lookup_voltage_type_v3 | 
| 559 | (voltage_info, voltage_type, voltage_mode)) ? true1 : false0; | 
| 560 | |
| 561 | return ret; | 
| 562 | } | 
| 563 | |
| 564 | int atomctrl_get_voltage_table_v3( | 
| 565 | struct pp_hwmgr *hwmgr, | 
| 566 | uint8_t voltage_type, | 
| 567 | uint8_t voltage_mode, | 
| 568 | pp_atomctrl_voltage_table *voltage_table) | 
| 569 | { | 
| 570 | ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = | 
| 571 | (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev); | 
| 572 | const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; | 
| 573 | unsigned int i; | 
| 574 | |
| 575 | PP_ASSERT_WITH_CODE((NULL != voltage_info),do { if (!((((void *)0) != voltage_info))) { printk("\0014" "amdgpu: " "%s\n", "Could not find Voltage Table in BIOS."); return -1; ; } } while (0) | 
| 576 | "Could not find Voltage Table in BIOS.", return -1;)do { if (!((((void *)0) != voltage_info))) { printk("\0014" "amdgpu: " "%s\n", "Could not find Voltage Table in BIOS."); return -1; ; } } while (0); | 
| 577 | |
| 578 | voltage_object = atomctrl_lookup_voltage_type_v3 | 
| 579 | (voltage_info, voltage_type, voltage_mode); | 
| 580 | |
| 581 | if (voltage_object == NULL((void *)0)) | 
| 582 | return -1; | 
| 583 | |
| 584 | PP_ASSERT_WITH_CODE(do { if (!((voltage_object->asGpioVoltageObj.ucGpioEntryNum <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Too many voltage entries!" ); return -1;; } } while (0) | 
| 585 | (voltage_object->asGpioVoltageObj.ucGpioEntryNum <=do { if (!((voltage_object->asGpioVoltageObj.ucGpioEntryNum <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Too many voltage entries!" ); return -1;; } } while (0) | 
| 586 | PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES),do { if (!((voltage_object->asGpioVoltageObj.ucGpioEntryNum <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Too many voltage entries!" ); return -1;; } } while (0) | 
| 587 | "Too many voltage entries!",do { if (!((voltage_object->asGpioVoltageObj.ucGpioEntryNum <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Too many voltage entries!" ); return -1;; } } while (0) | 
| 588 | return -1;do { if (!((voltage_object->asGpioVoltageObj.ucGpioEntryNum <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Too many voltage entries!" ); return -1;; } } while (0) | 
| 589 | )do { if (!((voltage_object->asGpioVoltageObj.ucGpioEntryNum <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Too many voltage entries!" ); return -1;; } } while (0); | 
| 590 | |
| 591 | for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) { | 
| 592 | voltage_table->entries[i].value = | 
| 593 | le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue)((__uint16_t)(voltage_object->asGpioVoltageObj.asVolGpioLut [i].usVoltageValue)); | 
| 594 | voltage_table->entries[i].smio_low = | 
| 595 | le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId)((__uint32_t)(voltage_object->asGpioVoltageObj.asVolGpioLut [i].ulVoltageId)); | 
| 596 | } | 
| 597 | |
| 598 | voltage_table->mask_low = | 
| 599 | le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal)((__uint32_t)(voltage_object->asGpioVoltageObj.ulGpioMaskVal )); | 
| 600 | voltage_table->count = | 
| 601 | voltage_object->asGpioVoltageObj.ucGpioEntryNum; | 
| 602 | voltage_table->phase_delay = | 
| 603 | voltage_object->asGpioVoltageObj.ucPhaseDelay; | 
| 604 | |
| 605 | return 0; | 
| 606 | } | 
| 607 | |
| 608 | static bool_Bool atomctrl_lookup_gpio_pin( | 
| 609 | ATOM_GPIO_PIN_LUT * gpio_lookup_table, | 
| 610 | const uint32_t pinId, | 
| 611 | pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment) | 
| 612 | { | 
| 613 | unsigned int size = le16_to_cpu(gpio_lookup_table->sHeader.usStructureSize)((__uint16_t)(gpio_lookup_table->sHeader.usStructureSize)); | 
| 614 | unsigned int offset = offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0])__builtin_offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0]); | 
| 615 | uint8_t *start = (uint8_t *)gpio_lookup_table; | 
| 616 | |
| 617 | while (offset < size) { | 
| 618 | const ATOM_GPIO_PIN_ASSIGNMENT *pin_assignment = | 
| 619 | (const ATOM_GPIO_PIN_ASSIGNMENT *)(start + offset); | 
| 620 | |
| 621 | if (pinId == pin_assignment->ucGPIO_ID) { | 
| 622 | gpio_pin_assignment->uc_gpio_pin_bit_shift = | 
| 623 | pin_assignment->ucGpioPinBitShift; | 
| 624 | gpio_pin_assignment->us_gpio_pin_aindex = | 
| 625 | le16_to_cpu(pin_assignment->usGpioPin_AIndex)((__uint16_t)(pin_assignment->usGpioPin_AIndex)); | 
| 626 | return true1; | 
| 627 | } | 
| 628 | |
| 629 | offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID)__builtin_offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1; | 
| 630 | } | 
| 631 | |
| 632 | return false0; | 
| 633 | } | 
| 634 | |
| 635 | /* | 
| 636 | * Private Function to get the PowerPlay Table Address. | 
| 637 | * WARNING: The tabled returned by this function is in | 
| 638 | * dynamically allocated memory. | 
| 639 | * The caller has to release if by calling kfree. | 
| 640 | */ | 
| 641 | static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device) | 
| 642 | { | 
| 643 | u8 frev, crev; | 
| 644 | u16 size; | 
| 645 | void *table_address; | 
| 646 | |
| 647 | table_address = (ATOM_GPIO_PIN_LUT *) | 
| 648 | smu_atom_get_data_table(device, | 
| 649 | GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, GPIO_Pin_LUT ) / sizeof(USHORT)), | 
| 650 | &size, &frev, &crev); | 
| 651 | |
| 652 | PP_ASSERT_WITH_CODE((NULL != table_address),do { if (!((((void *)0) != table_address))) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving BIOS Table Address!"); return ((void *)0);; } } while (0) | 
| 653 | "Error retrieving BIOS Table Address!", return NULL;)do { if (!((((void *)0) != table_address))) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving BIOS Table Address!"); return ((void *)0);; } } while (0); | 
| 654 | |
| 655 | return (ATOM_GPIO_PIN_LUT *)table_address; | 
| 656 | } | 
| 657 | |
| 658 | /* | 
| 659 | * Returns 1 if the given pin id find in lookup table. | 
| 660 | */ | 
| 661 | bool_Bool atomctrl_get_pp_assign_pin( | 
| 662 | struct pp_hwmgr *hwmgr, | 
| 663 | const uint32_t pinId, | 
| 664 | pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment) | 
| 665 | { | 
| 666 | bool_Bool bRet = false0; | 
| 667 | ATOM_GPIO_PIN_LUT *gpio_lookup_table = | 
| 668 | get_gpio_lookup_table(hwmgr->adev); | 
| 669 | |
| 670 | PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),do { if (!((((void *)0) != gpio_lookup_table))) { printk("\0014" "amdgpu: " "%s\n", "Could not find GPIO lookup Table in BIOS." ); return 0; } } while (0) | 
| 671 | "Could not find GPIO lookup Table in BIOS.", return false)do { if (!((((void *)0) != gpio_lookup_table))) { printk("\0014" "amdgpu: " "%s\n", "Could not find GPIO lookup Table in BIOS." ); return 0; } } while (0); | 
| 672 | |
| 673 | bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId, | 
| 674 | gpio_pin_assignment); | 
| 675 | |
| 676 | return bRet; | 
| 677 | } | 
| 678 | |
| 679 | int atomctrl_calculate_voltage_evv_on_sclk( | 
| 680 | struct pp_hwmgr *hwmgr, | 
| 681 | uint8_t voltage_type, | 
| 682 | uint32_t sclk, | 
| 683 | uint16_t virtual_voltage_Id, | 
| 684 | uint16_t *voltage, | 
| 685 | uint16_t dpm_level, | 
| 686 | bool_Bool debug) | 
| 687 | { | 
| 688 | ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo; | 
| 689 | struct amdgpu_device *adev = hwmgr->adev; | 
| 690 | EFUSE_LINEAR_FUNC_PARAM sRO_fuse; | 
| 691 | EFUSE_LINEAR_FUNC_PARAM sCACm_fuse; | 
| 692 | EFUSE_LINEAR_FUNC_PARAM sCACb_fuse; | 
| 693 | EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse; | 
| 694 | EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse; | 
| 695 | EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse; | 
| 696 | EFUSE_INPUT_PARAMETER sInput_FuseValues; | 
| 697 | READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues; | 
| 698 | |
| 699 | uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused; | 
| 700 | fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7; | 
| 701 | fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma; | 
| 702 | fInt fLkg_FT, repeat; | 
| 703 | fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX; | 
| 704 | fInt fRLL_LoadLine, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin; | 
| 705 | fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM; | 
| 706 | fInt fSclk_margin, fSclk, fEVV_V; | 
| 707 | fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL; | 
| 708 | uint32_t ul_FT_Lkg_V0NORM; | 
| 709 | fInt fLn_MaxDivMin, fMin, fAverage, fRange; | 
| 710 | fInt fRoots[2]; | 
| 711 | fInt fStepSize = GetScaledFraction(625, 100000); | 
| 712 | |
| 713 | int result; | 
| 714 | |
| 715 | getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *) | 
| 716 | smu_atom_get_data_table(hwmgr->adev, | 
| 717 | GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, ASIC_ProfilingInfo ) / sizeof(USHORT)), | 
| 718 | NULL((void *)0), NULL((void *)0), NULL((void *)0)); | 
| 719 | |
| 720 | if (!getASICProfilingInfo) | 
| 721 | return -1; | 
| 722 | |
| 723 | if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || | 
| 724 | (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && | 
| 725 | getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) | 
| 726 | return -1; | 
| 727 | |
| 728 | /*----------------------------------------------------------- | 
| 729 | *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL | 
| 730 | *----------------------------------------------------------- | 
| 731 | */ | 
| 732 | fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000); | 
| 733 | |
| 734 | switch (dpm_level) { | 
| 735 | case 1: | 
| 736 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM1)), 1000); | 
| 737 | break; | 
| 738 | case 2: | 
| 739 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM2)), 1000); | 
| 740 | break; | 
| 741 | case 3: | 
| 742 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM3)), 1000); | 
| 743 | break; | 
| 744 | case 4: | 
| 745 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM4)), 1000); | 
| 746 | break; | 
| 747 | case 5: | 
| 748 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM5)), 1000); | 
| 749 | break; | 
| 750 | case 6: | 
| 751 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM6)), 1000); | 
| 752 | break; | 
| 753 | case 7: | 
| 754 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM7)), 1000); | 
| 755 | break; | 
| 756 | default: | 
| 757 | pr_err("DPM Level not supported\n")printk("\0013" "amdgpu: " "DPM Level not supported\n"); | 
| 758 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM0)), 1000); | 
| 759 | } | 
| 760 | |
| 761 | /*------------------------- | 
| 762 | * DECODING FUSE VALUES | 
| 763 | * ------------------------ | 
| 764 | */ | 
| 765 | /*Decode RO_Fused*/ | 
| 766 | sRO_fuse = getASICProfilingInfo->sRoFuse; | 
| 767 | |
| 768 | sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex; | 
| 769 | sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB; | 
| 770 | sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength; | 
| 771 | |
| 772 | sOutput_FuseValues.sEfuse = sInput_FuseValues; | 
| 773 | |
| 774 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 775 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ReadEfuseValue ) / sizeof(USHORT)), | 
| 776 | (uint32_t *)&sOutput_FuseValues); | 
| 777 | |
| 778 | if (result) | 
| 779 | return result; | 
| 780 | |
| 781 | /* Finally, the actual fuse value */ | 
| 782 | ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); | 
| 783 | fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin)((__uint32_t)(sRO_fuse.ulEfuseMin)), 1); | 
| 784 | fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange)((__uint32_t)(sRO_fuse.ulEfuseEncodeRange)), 1); | 
| 785 | fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength); | 
| 786 | |
| 787 | sCACm_fuse = getASICProfilingInfo->sCACm; | 
| 788 | |
| 789 | sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex; | 
| 790 | sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB; | 
| 791 | sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength; | 
| 792 | |
| 793 | sOutput_FuseValues.sEfuse = sInput_FuseValues; | 
| 794 | |
| 795 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 796 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ReadEfuseValue ) / sizeof(USHORT)), | 
| 797 | (uint32_t *)&sOutput_FuseValues); | 
| 798 | |
| 799 | if (result) | 
| 800 | return result; | 
| 801 | |
| 802 | ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); | 
| 803 | fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin)((__uint32_t)(sCACm_fuse.ulEfuseMin)), 1000); | 
| 804 | fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange)((__uint32_t)(sCACm_fuse.ulEfuseEncodeRange)), 1000); | 
| 805 | |
| 806 | fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength); | 
| 807 | |
| 808 | sCACb_fuse = getASICProfilingInfo->sCACb; | 
| 809 | |
| 810 | sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex; | 
| 811 | sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB; | 
| 812 | sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength; | 
| 813 | sOutput_FuseValues.sEfuse = sInput_FuseValues; | 
| 814 | |
| 815 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 816 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ReadEfuseValue ) / sizeof(USHORT)), | 
| 817 | (uint32_t *)&sOutput_FuseValues); | 
| 818 | |
| 819 | if (result) | 
| 820 | return result; | 
| 821 | |
| 822 | ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); | 
| 823 | fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin)((__uint32_t)(sCACb_fuse.ulEfuseMin)), 1000); | 
| 824 | fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange)((__uint32_t)(sCACb_fuse.ulEfuseEncodeRange)), 1000); | 
| 825 | |
| 826 | fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength); | 
| 827 | |
| 828 | sKt_Beta_fuse = getASICProfilingInfo->sKt_b; | 
| 829 | |
| 830 | sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex; | 
| 831 | sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB; | 
| 832 | sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength; | 
| 833 | |
| 834 | sOutput_FuseValues.sEfuse = sInput_FuseValues; | 
| 835 | |
| 836 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 837 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ReadEfuseValue ) / sizeof(USHORT)), | 
| 838 | (uint32_t *)&sOutput_FuseValues); | 
| 839 | |
| 840 | if (result) | 
| 841 | return result; | 
| 842 | |
| 843 | ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); | 
| 844 | fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage)((__uint32_t)(sKt_Beta_fuse.ulEfuseEncodeAverage)), 1000); | 
| 845 | fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange)((__uint32_t)(sKt_Beta_fuse.ulEfuseEncodeRange)), 1000); | 
| 846 | |
| 847 | fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused, | 
| 848 | fAverage, fRange, sKt_Beta_fuse.ucEfuseLength); | 
| 849 | |
| 850 | sKv_m_fuse = getASICProfilingInfo->sKv_m; | 
| 851 | |
| 852 | sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex; | 
| 853 | sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB; | 
| 854 | sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength; | 
| 855 | |
| 856 | sOutput_FuseValues.sEfuse = sInput_FuseValues; | 
| 857 | |
| 858 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 859 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ReadEfuseValue ) / sizeof(USHORT)), | 
| 860 | (uint32_t *)&sOutput_FuseValues); | 
| 861 | if (result) | 
| 862 | return result; | 
| 863 | |
| 864 | ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); | 
| 865 | fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage)((__uint32_t)(sKv_m_fuse.ulEfuseEncodeAverage)), 1000); | 
| 866 | fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange)((__uint32_t)(sKv_m_fuse.ulEfuseEncodeRange)) & 0x7fffffff), 1000); | 
| 867 | fRange = fMultiply(fRange, ConvertToFraction(-1)); | 
| 868 | |
| 869 | fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused, | 
| 870 | fAverage, fRange, sKv_m_fuse.ucEfuseLength); | 
| 871 | |
| 872 | sKv_b_fuse = getASICProfilingInfo->sKv_b; | 
| 873 | |
| 874 | sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex; | 
| 875 | sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB; | 
| 876 | sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength; | 
| 877 | sOutput_FuseValues.sEfuse = sInput_FuseValues; | 
| 878 | |
| 879 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 880 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ReadEfuseValue ) / sizeof(USHORT)), | 
| 881 | (uint32_t *)&sOutput_FuseValues); | 
| 882 | |
| 883 | if (result) | 
| 884 | return result; | 
| 885 | |
| 886 | ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); | 
| 887 | fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage)((__uint32_t)(sKv_b_fuse.ulEfuseEncodeAverage)), 1000); | 
| 888 | fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange)((__uint32_t)(sKv_b_fuse.ulEfuseEncodeRange)), 1000); | 
| 889 | |
| 890 | fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused, | 
| 891 | fAverage, fRange, sKv_b_fuse.ucEfuseLength); | 
| 892 | |
| 893 | /* Decoding the Leakage - No special struct container */ | 
| 894 | /* | 
| 895 | * usLkgEuseIndex=56 | 
| 896 | * ucLkgEfuseBitLSB=6 | 
| 897 | * ucLkgEfuseLength=10 | 
| 898 | * ulLkgEncodeLn_MaxDivMin=69077 | 
| 899 | * ulLkgEncodeMax=1000000 | 
| 900 | * ulLkgEncodeMin=1000 | 
| 901 | * ulEfuseLogisticAlpha=13 | 
| 902 | */ | 
| 903 | |
| 904 | sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex; | 
| 905 | sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB; | 
| 906 | sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength; | 
| 907 | |
| 908 | sOutput_FuseValues.sEfuse = sInput_FuseValues; | 
| 909 | |
| 910 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 911 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ReadEfuseValue ) / sizeof(USHORT)), | 
| 912 | (uint32_t *)&sOutput_FuseValues); | 
| 913 | |
| 914 | if (result) | 
| 915 | return result; | 
| 916 | |
| 917 | ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); | 
| 918 | fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin)((__uint32_t)(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin )), 10000); | 
| 919 | fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin)((__uint32_t)(getASICProfilingInfo->ulLkgEncodeMin)), 10000); | 
| 920 | |
| 921 | fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM, | 
| 922 | fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength); | 
| 923 | fLkg_FT = fFT_Lkg_V0NORM; | 
| 924 | |
| 925 | /*------------------------------------------- | 
| 926 | * PART 2 - Grabbing all required values | 
| 927 | *------------------------------------------- | 
| 928 | */ | 
| 929 | fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0)((__uint32_t)(getASICProfilingInfo->ulSM_A0)), 1000000), | 
| 930 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign))); | 
| 931 | fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1)((__uint32_t)(getASICProfilingInfo->ulSM_A1)), 1000000), | 
| 932 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign))); | 
| 933 | fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2)((__uint32_t)(getASICProfilingInfo->ulSM_A2)), 100000), | 
| 934 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign))); | 
| 935 | fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3)((__uint32_t)(getASICProfilingInfo->ulSM_A3)), 1000000), | 
| 936 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign))); | 
| 937 | fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4)((__uint32_t)(getASICProfilingInfo->ulSM_A4)), 1000000), | 
| 938 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign))); | 
| 939 | fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5)((__uint32_t)(getASICProfilingInfo->ulSM_A5)), 1000), | 
| 940 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign))); | 
| 941 | fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6)((__uint32_t)(getASICProfilingInfo->ulSM_A6)), 1000), | 
| 942 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign))); | 
| 943 | fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7)((__uint32_t)(getASICProfilingInfo->ulSM_A7)), 1000), | 
| 944 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign))); | 
| 945 | |
| 946 | fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a)((__uint32_t)(getASICProfilingInfo->ulMargin_RO_a))); | 
| 947 | fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b)((__uint32_t)(getASICProfilingInfo->ulMargin_RO_b))); | 
| 948 | fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c)((__uint32_t)(getASICProfilingInfo->ulMargin_RO_c))); | 
| 949 | |
| 950 | fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed)((__uint32_t)(getASICProfilingInfo->ulMargin_fixed))); | 
| 951 | |
| 952 | fMargin_FMAX_mean = GetScaledFraction( | 
| 953 | le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean)((__uint32_t)(getASICProfilingInfo->ulMargin_Fmax_mean)), 10000); | 
| 954 | fMargin_Plat_mean = GetScaledFraction( | 
| 955 | le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean)((__uint32_t)(getASICProfilingInfo->ulMargin_plat_mean)), 10000); | 
| 956 | fMargin_FMAX_sigma = GetScaledFraction( | 
| 957 | le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma)((__uint32_t)(getASICProfilingInfo->ulMargin_Fmax_sigma)), 10000); | 
| 958 | fMargin_Plat_sigma = GetScaledFraction( | 
| 959 | le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma)((__uint32_t)(getASICProfilingInfo->ulMargin_plat_sigma)), 10000); | 
| 960 | |
| 961 | fMargin_DC_sigma = GetScaledFraction( | 
| 962 | le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma)((__uint32_t)(getASICProfilingInfo->ulMargin_DC_sigma)), 100); | 
| 963 | fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000)); | 
| 964 | |
| 965 | fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100)); | 
| 966 | fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100)); | 
| 967 | fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100)); | 
| 968 | fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100))); | 
| 969 | fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10)); | 
| 970 | |
| 971 | fSclk = GetScaledFraction(sclk, 100); | 
| 972 | |
| 973 | fV_max = fDivide(GetScaledFraction( | 
| 974 | le32_to_cpu(getASICProfilingInfo->ulMaxVddc)((__uint32_t)(getASICProfilingInfo->ulMaxVddc)), 1000), ConvertToFraction(4)); | 
| 975 | fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp)((__uint32_t)(getASICProfilingInfo->ulBoardCoreTemp)), 10); | 
| 976 | fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor)((__uint32_t)(getASICProfilingInfo->ulEvvLkgFactor)), 100); | 
| 977 | fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp)((__uint32_t)(getASICProfilingInfo->ulLeakageTemp)), 10); | 
| 978 | fV_FT = fDivide(GetScaledFraction( | 
| 979 | le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage)((__uint32_t)(getASICProfilingInfo->ulLeakageVoltage)), 1000), ConvertToFraction(4)); | 
| 980 | fV_min = fDivide(GetScaledFraction( | 
| 981 | le32_to_cpu(getASICProfilingInfo->ulMinVddc)((__uint32_t)(getASICProfilingInfo->ulMinVddc)), 1000), ConvertToFraction(4)); | 
| 982 | |
| 983 | /*----------------------- | 
| 984 | * PART 3 | 
| 985 | *----------------------- | 
| 986 | */ | 
| 987 | |
| 988 | fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5)); | 
| 989 | fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); | 
| 990 | fC_Term = fAdd(fMargin_RO_c, | 
| 991 | fAdd(fMultiply(fSM_A0, fLkg_FT), | 
| 992 | fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)), | 
| 993 | fAdd(fMultiply(fSM_A3, fSclk), | 
| 994 | fSubtract(fSM_A7, fRO_fused))))); | 
| 995 | |
| 996 | fVDDC_base = fSubtract(fRO_fused, | 
| 997 | fSubtract(fMargin_RO_c, | 
| 998 | fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk)))); | 
| 999 | fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2)); | 
| 1000 | |
| 1001 | repeat = fSubtract(fVDDC_base, | 
| 1002 | fDivide(fMargin_DC_sigma, ConvertToFraction(1000))); | 
| 1003 | |
| 1004 | fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a, | 
| 1005 | fGetSquare(repeat)), | 
| 1006 | fAdd(fMultiply(fMargin_RO_b, repeat), | 
| 1007 | fMargin_RO_c)); | 
| 1008 | |
| 1009 | fDC_SCLK = fSubtract(fRO_fused, | 
| 1010 | fSubtract(fRO_DC_margin, | 
| 1011 | fSubtract(fSM_A3, | 
| 1012 | fMultiply(fSM_A2, repeat)))); | 
| 1013 | fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1)); | 
| 1014 | |
| 1015 | fSigma_DC = fSubtract(fSclk, fDC_SCLK); | 
| 1016 | |
| 1017 | fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean); | 
| 1018 | fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean); | 
| 1019 | fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma); | 
| 1020 | fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma); | 
| 1021 | |
| 1022 | fSquared_Sigma_DC = fGetSquare(fSigma_DC); | 
| 1023 | fSquared_Sigma_CR = fGetSquare(fSigma_CR); | 
| 1024 | fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX); | 
| 1025 | |
| 1026 | fSclk_margin = fAdd(fMicro_FMAX, | 
| 1027 | fAdd(fMicro_CR, | 
| 1028 | fAdd(fMargin_fixed, | 
| 1029 | fSqrt(fAdd(fSquared_Sigma_FMAX, | 
| 1030 | fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR)))))); | 
| 1031 | /* | 
| 1032 | fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5; | 
| 1033 | fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6; | 
| 1034 | fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused; | 
| 1035 | */ | 
| 1036 | |
| 1037 | fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5); | 
| 1038 | fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6); | 
| 1039 | fC_Term = fAdd(fRO_DC_margin, | 
| 1040 | fAdd(fMultiply(fSM_A0, fLkg_FT), | 
| 1041 | fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT), | 
| 1042 | fAdd(fSclk, fSclk_margin)), | 
| 1043 | fAdd(fMultiply(fSM_A3, | 
| 1044 | fAdd(fSclk, fSclk_margin)), | 
| 1045 | fSubtract(fSM_A7, fRO_fused))))); | 
| 1046 | |
| 1047 | SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots); | 
| 1048 | |
| 1049 | if (GreaterThan(fRoots[0], fRoots[1])) | 
| 1050 | fEVV_V = fRoots[1]; | 
| 1051 | else | 
| 1052 | fEVV_V = fRoots[0]; | 
| 1053 | |
| 1054 | if (GreaterThan(fV_min, fEVV_V)) | 
| 1055 | fEVV_V = fV_min; | 
| 1056 | else if (GreaterThan(fEVV_V, fV_max)) | 
| 1057 | fEVV_V = fSubtract(fV_max, fStepSize); | 
| 1058 | |
| 1059 | fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0); | 
| 1060 | |
| 1061 | /*----------------- | 
| 1062 | * PART 4 | 
| 1063 | *----------------- | 
| 1064 | */ | 
| 1065 | |
| 1066 | fV_x = fV_min; | 
| 1067 | |
| 1068 | while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) { | 
| 1069 | fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd( | 
| 1070 | fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk), | 
| 1071 | fGetSquare(fV_x)), fDerateTDP); | 
| 1072 | |
| 1073 | fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor, | 
| 1074 | fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused, | 
| 1075 | fT_prod), fKv_b_fused), fV_x)), fV_x))); | 
| 1076 | fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply( | 
| 1077 | fKt_Beta_fused, fT_prod))); | 
| 1078 | fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( | 
| 1079 | fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT))); | 
| 1080 | fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( | 
| 1081 | fKt_Beta_fused, fT_FT))); | 
| 1082 | |
| 1083 | fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right); | 
| 1084 | |
| 1085 | fTDP_Current = fDivide(fTDP_Power, fV_x); | 
| 1086 | |
| 1087 | fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine), | 
| 1088 | ConvertToFraction(10))); | 
| 1089 | |
| 1090 | fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0); | 
| 1091 | |
| 1092 | if (GreaterThan(fV_max, fV_NL) && | 
| 1093 | (GreaterThan(fV_NL, fEVV_V) || | 
| 1094 | Equal(fV_NL, fEVV_V))) { | 
| 1095 | fV_NL = fMultiply(fV_NL, ConvertToFraction(1000)); | 
| 1096 | |
| 1097 | *voltage = (uint16_t)fV_NL.partial.real; | 
| 1098 | break; | 
| 1099 | } else | 
| 1100 | fV_x = fAdd(fV_x, fStepSize); | 
| 1101 | } | 
| 1102 | |
| 1103 | return result; | 
| 1104 | } | 
| 1105 | |
| 1106 | /** | 
| 1107 | * atomctrl_get_voltage_evv_on_sclk: gets voltage via call to ATOM COMMAND table. | 
| 1108 | * @hwmgr: input: pointer to hwManager | 
| 1109 | * @voltage_type: input: type of EVV voltage VDDC or VDDGFX | 
| 1110 | * @sclk: input: in 10Khz unit. DPM state SCLK frequency | 
| 1111 | * which is define in PPTable SCLK/VDDC dependence | 
| 1112 | * table associated with this virtual_voltage_Id | 
| 1113 | * @virtual_voltage_Id: input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08 | 
| 1114 | * @voltage: output: real voltage level in unit of mv | 
| 1115 | */ | 
| 1116 | int atomctrl_get_voltage_evv_on_sclk( | 
| 1117 | struct pp_hwmgr *hwmgr, | 
| 1118 | uint8_t voltage_type, | 
| 1119 | uint32_t sclk, uint16_t virtual_voltage_Id, | 
| 1120 | uint16_t *voltage) | 
| 1121 | { | 
| 1122 | struct amdgpu_device *adev = hwmgr->adev; | 
| 1123 | GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; | 
| 1124 | int result; | 
| 1125 | |
| 1126 | get_voltage_info_param_space.ucVoltageType = | 
| 1127 | voltage_type; | 
| 1128 | get_voltage_info_param_space.ucVoltageMode = | 
| 1129 | ATOM_GET_VOLTAGE_EVV_VOLTAGE0x09; | 
| 1130 | get_voltage_info_param_space.usVoltageLevel = | 
| 1131 | cpu_to_le16(virtual_voltage_Id)((__uint16_t)(virtual_voltage_Id)); | 
| 1132 | get_voltage_info_param_space.ulSCLKFreq = | 
| 1133 | cpu_to_le32(sclk)((__uint32_t)(sclk)); | 
| 1134 | |
| 1135 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 1136 | GetIndexIntoMasterTable(COMMAND, GetVoltageInfo)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, GetVoltageInfo ) / sizeof(USHORT)), | 
| 1137 | (uint32_t *)&get_voltage_info_param_space); | 
| 1138 | |
| 1139 | *voltage = result ? 0 : | 
| 1140 | le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)((__uint16_t)(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) (&get_voltage_info_param_space))->usVoltageLevel)) | 
| 1141 | (&get_voltage_info_param_space))->usVoltageLevel)((__uint16_t)(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) (&get_voltage_info_param_space))->usVoltageLevel)); | 
| 1142 | |
| 1143 | return result; | 
| 1144 | } | 
| 1145 | |
| 1146 | /** | 
| 1147 | * atomctrl_get_voltage_evv: gets voltage via call to ATOM COMMAND table. | 
| 1148 | * @hwmgr: input: pointer to hwManager | 
| 1149 | * @virtual_voltage_id: input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08 | 
| 1150 | * @voltage: output: real voltage level in unit of mv | 
| 1151 | */ | 
| 1152 | int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, | 
| 1153 | uint16_t virtual_voltage_id, | 
| 1154 | uint16_t *voltage) | 
| 1155 | { | 
| 1156 | struct amdgpu_device *adev = hwmgr->adev; | 
| 1157 | GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; | 
| 1158 | int result; | 
| 1159 | int entry_id; | 
| 1160 | |
| 1161 | /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ | 
| 1162 | for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) { | 
| 1163 | if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) { | 
| 1164 | /* found */ | 
| 1165 | break; | 
| 1166 | } | 
| 1167 | } | 
| 1168 | |
| 1169 | if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) { | 
| 1170 | pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n")do { } while(0); | 
| 1171 | return -EINVAL22; | 
| 1172 | } | 
| 1173 | |
| 1174 | get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC1; | 
| 1175 | get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE0x09; | 
| 1176 | get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id; | 
| 1177 | get_voltage_info_param_space.ulSCLKFreq = | 
| 1178 | cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk)((__uint32_t)(hwmgr->dyn_state.vddc_dependency_on_sclk-> entries[entry_id].clk)); | 
| 1179 | |
| 1180 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 1181 | GetIndexIntoMasterTable(COMMAND, GetVoltageInfo)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, GetVoltageInfo ) / sizeof(USHORT)), | 
| 1182 | (uint32_t *)&get_voltage_info_param_space); | 
| 1183 | |
| 1184 | if (0 != result) | 
| 1185 | return result; | 
| 1186 | |
| 1187 | *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)((__uint16_t)(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) (&get_voltage_info_param_space))->usVoltageLevel)) | 
| 1188 | (&get_voltage_info_param_space))->usVoltageLevel)((__uint16_t)(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) (&get_voltage_info_param_space))->usVoltageLevel)); | 
| 1189 | |
| 1190 | return result; | 
| 1191 | } | 
| 1192 | |
| 1193 | /* | 
| 1194 | * Get the mpll reference clock in 10KHz | 
| 1195 | */ | 
| 1196 | uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr) | 
| 1197 | { | 
| 1198 | ATOM_COMMON_TABLE_HEADER *fw_info; | 
| 1199 | uint32_t clock; | 
| 1200 | u8 frev, crev; | 
| 1201 | u16 size; | 
| 1202 | |
| 1203 | fw_info = (ATOM_COMMON_TABLE_HEADER *) | 
| 1204 | smu_atom_get_data_table(hwmgr->adev, | 
| 1205 | GetIndexIntoMasterTable(DATA, FirmwareInfo)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, FirmwareInfo ) / sizeof(USHORT)), | 
| 1206 | &size, &frev, &crev); | 
| 1207 | |
| 1208 | if (fw_info == NULL((void *)0)) | 
| 1209 | clock = 2700; | 
| 1210 | else { | 
| 1211 | if ((fw_info->ucTableFormatRevision == 2) && | 
| 1212 | (le16_to_cpu(fw_info->usStructureSize)((__uint16_t)(fw_info->usStructureSize)) >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) { | 
| 1213 | ATOM_FIRMWARE_INFO_V2_1 *fwInfo_2_1 = | 
| 1214 | (ATOM_FIRMWARE_INFO_V2_1 *)fw_info; | 
| 1215 | clock = (uint32_t)(le16_to_cpu(fwInfo_2_1->usMemoryReferenceClock)((__uint16_t)(fwInfo_2_1->usMemoryReferenceClock))); | 
| 1216 | } else { | 
| 1217 | ATOM_FIRMWARE_INFO *fwInfo_0_0 = | 
| 1218 | (ATOM_FIRMWARE_INFO *)fw_info; | 
| 1219 | clock = (uint32_t)(le16_to_cpu(fwInfo_0_0->usReferenceClock)((__uint16_t)(fwInfo_0_0->usReferenceClock))); | 
| 1220 | } | 
| 1221 | } | 
| 1222 | |
| 1223 | return clock; | 
| 1224 | } | 
| 1225 | |
| 1226 | /* | 
| 1227 | * Get the asic internal spread spectrum table | 
| 1228 | */ | 
| 1229 | static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device) | 
| 1230 | { | 
| 1231 | ATOM_ASIC_INTERNAL_SS_INFO *table = NULL((void *)0); | 
| 1232 | u8 frev, crev; | 
| 1233 | u16 size; | 
| 1234 | |
| 1235 | table = (ATOM_ASIC_INTERNAL_SS_INFO *) | 
| 1236 | smu_atom_get_data_table(device, | 
| 1237 | GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, ASIC_InternalSS_Info ) / sizeof(USHORT)), | 
| 1238 | &size, &frev, &crev); | 
| 1239 | |
| 1240 | return table; | 
| 1241 | } | 
| 1242 | |
| 1243 | bool_Bool atomctrl_is_asic_internal_ss_supported(struct pp_hwmgr *hwmgr) | 
| 1244 | { | 
| 1245 | ATOM_ASIC_INTERNAL_SS_INFO *table = | 
| 1246 | asic_internal_ss_get_ss_table(hwmgr->adev); | 
| 1247 | |
| 1248 | if (table) | 
| 1249 | return true1; | 
| 1250 | else | 
| 1251 | return false0; | 
| 1252 | } | 
| 1253 | |
| 1254 | /* | 
| 1255 | * Get the asic internal spread spectrum assignment | 
| 1256 | */ | 
| 1257 | static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr, | 
| 1258 | const uint8_t clockSource, | 
| 1259 | const uint32_t clockSpeed, | 
| 1260 | pp_atomctrl_internal_ss_info *ssEntry) | 
| 1261 | { | 
| 1262 | ATOM_ASIC_INTERNAL_SS_INFO *table; | 
| 1263 | ATOM_ASIC_SS_ASSIGNMENT *ssInfo; | 
| 1264 | int entry_found = 0; | 
| 1265 | |
| 1266 | memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info))__builtin_memset((ssEntry), (0x00), (sizeof(pp_atomctrl_internal_ss_info ))); | 
| 1267 | |
| 1268 | table = asic_internal_ss_get_ss_table(hwmgr->adev); | 
| 1269 | |
| 1270 | if (NULL((void *)0) == table) | 
| 1271 | return -1; | 
| 1272 | |
| 1273 | ssInfo = &table->asSpreadSpectrum[0]; | 
| 1274 | |
| 1275 | while (((uint8_t *)ssInfo - (uint8_t *)table) < | 
| 1276 | le16_to_cpu(table->sHeader.usStructureSize)((__uint16_t)(table->sHeader.usStructureSize))) { | 
| 1277 | if ((clockSource == ssInfo->ucClockIndication) && | 
| 1278 | ((uint32_t)clockSpeed <= le32_to_cpu(ssInfo->ulTargetClockRange)((__uint32_t)(ssInfo->ulTargetClockRange)))) { | 
| 1279 | entry_found = 1; | 
| 1280 | break; | 
| 1281 | } | 
| 1282 | |
| 1283 | ssInfo = (ATOM_ASIC_SS_ASSIGNMENT *)((uint8_t *)ssInfo + | 
| 1284 | sizeof(ATOM_ASIC_SS_ASSIGNMENT)); | 
| 1285 | } | 
| 1286 | |
| 1287 | if (entry_found) { | 
| 1288 | ssEntry->speed_spectrum_percentage = | 
| 1289 | le16_to_cpu(ssInfo->usSpreadSpectrumPercentage)((__uint16_t)(ssInfo->usSpreadSpectrumPercentage)); | 
| 1290 | ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz)((__uint16_t)(ssInfo->usSpreadRateInKhz)); | 
| 1291 | |
| 1292 | if (((GET_DATA_TABLE_MAJOR_REVISION(table)((((ATOM_COMMON_TABLE_HEADER*)table)->ucTableFormatRevision )&0x3F) == 2) && | 
| 1293 | (GET_DATA_TABLE_MINOR_REVISION(table)((((ATOM_COMMON_TABLE_HEADER*)table)->ucTableContentRevision )&0x3F) >= 2)) || | 
| 1294 | (GET_DATA_TABLE_MAJOR_REVISION(table)((((ATOM_COMMON_TABLE_HEADER*)table)->ucTableFormatRevision )&0x3F) == 3)) { | 
| 1295 | ssEntry->speed_spectrum_rate /= 100; | 
| 1296 | } | 
| 1297 | |
| 1298 | switch (ssInfo->ucSpreadSpectrumMode) { | 
| 1299 | case 0: | 
| 1300 | ssEntry->speed_spectrum_mode = | 
| 1301 | pp_atomctrl_spread_spectrum_mode_down; | 
| 1302 | break; | 
| 1303 | case 1: | 
| 1304 | ssEntry->speed_spectrum_mode = | 
| 1305 | pp_atomctrl_spread_spectrum_mode_center; | 
| 1306 | break; | 
| 1307 | default: | 
| 1308 | ssEntry->speed_spectrum_mode = | 
| 1309 | pp_atomctrl_spread_spectrum_mode_down; | 
| 1310 | break; | 
| 1311 | } | 
| 1312 | } | 
| 1313 | |
| 1314 | return entry_found ? 0 : 1; | 
| 1315 | } | 
| 1316 | |
| 1317 | /* | 
| 1318 | * Get the memory clock spread spectrum info | 
| 1319 | */ | 
| 1320 | int atomctrl_get_memory_clock_spread_spectrum( | 
| 1321 | struct pp_hwmgr *hwmgr, | 
| 1322 | const uint32_t memory_clock, | 
| 1323 | pp_atomctrl_internal_ss_info *ssInfo) | 
| 1324 | { | 
| 1325 | return asic_internal_ss_get_ss_asignment(hwmgr, | 
| 1326 | ASIC_INTERNAL_MEMORY_SS1, memory_clock, ssInfo); | 
| 1327 | } | 
| 1328 | |
| 1329 | /* | 
| 1330 | * Get the engine clock spread spectrum info | 
| 1331 | */ | 
| 1332 | int atomctrl_get_engine_clock_spread_spectrum( | 
| 1333 | struct pp_hwmgr *hwmgr, | 
| 1334 | const uint32_t engine_clock, | 
| 1335 | pp_atomctrl_internal_ss_info *ssInfo) | 
| 1336 | { | 
| 1337 | return asic_internal_ss_get_ss_asignment(hwmgr, | 
| 1338 | ASIC_INTERNAL_ENGINE_SS2, engine_clock, ssInfo); | 
| 1339 | } | 
| 1340 | |
| 1341 | int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, | 
| 1342 | uint16_t end_index, uint32_t *efuse) | 
| 1343 | { | 
| 1344 | struct amdgpu_device *adev = hwmgr->adev; | 
| 1345 | uint32_t mask; | 
| 1346 | int result; | 
| 1347 | READ_EFUSE_VALUE_PARAMETER efuse_param; | 
| 1348 | |
| 1349 | if ((end_index - start_index) == 31) | 
| 1350 | mask = 0xFFFFFFFF; | 
| 1351 | else | 
| 1352 | mask = (1 << ((end_index - start_index) + 1)) - 1; | 
| 1353 | |
| 1354 | efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4)((__uint16_t)((start_index / 32) * 4)); | 
| 1355 | efuse_param.sEfuse.ucBitShift = (uint8_t) | 
| 1356 | (start_index - ((start_index / 32) * 32)); | 
| 1357 | efuse_param.sEfuse.ucBitLength = (uint8_t) | 
| 1358 | ((end_index - start_index) + 1); | 
| 1359 | |
| 1360 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 1361 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, ReadEfuseValue ) / sizeof(USHORT)), | 
| 1362 | (uint32_t *)&efuse_param); | 
| 1363 | *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue)((__uint32_t)(efuse_param.ulEfuseValue)) & mask; | 
| 1364 | |
| 1365 | return result; | 
| 1366 | } | 
| 1367 | |
| 1368 | int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, | 
| 1369 | uint8_t level) | 
| 1370 | { | 
| 1371 | struct amdgpu_device *adev = hwmgr->adev; | 
| 1372 | DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters; | 
| 1373 | int result; | 
| 1374 | |
| 1375 | memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = | 
| 1376 | memory_clock & SET_CLOCK_FREQ_MASK0x00FFFFFF; | 
| 1377 | memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = | 
| 1378 | ADJUST_MC_SETTING_PARAM3; | 
| 1379 | memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level; | 
| 1380 | |
| 1381 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 1382 | GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, DynamicMemorySettings ) / sizeof(USHORT)), | 
| 1383 | (uint32_t *)&memory_clock_parameters); | 
| 1384 | |
| 1385 | return result; | 
| 1386 | } | 
| 1387 | |
| 1388 | int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, | 
| 1389 | uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage) | 
| 1390 | { | 
| 1391 | struct amdgpu_device *adev = hwmgr->adev; | 
| 1392 | int result; | 
| 1393 | GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space; | 
| 1394 | |
| 1395 | get_voltage_info_param_space.ucVoltageType = voltage_type; | 
| 1396 | get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE0x09; | 
| 1397 | get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id)((__uint16_t)(virtual_voltage_Id)); | 
| 1398 | get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk)((__uint32_t)(sclk)); | 
| 1399 | |
| 1400 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 1401 | GetIndexIntoMasterTable(COMMAND, GetVoltageInfo)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, GetVoltageInfo ) / sizeof(USHORT)), | 
| 1402 | (uint32_t *)&get_voltage_info_param_space); | 
| 1403 | |
| 1404 | *voltage = result ? 0 : | 
| 1405 | le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel)((__uint32_t)(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *) (&get_voltage_info_param_space))->ulVoltageLevel)); | 
| 1406 | |
| 1407 | return result; | 
| 1408 | } | 
| 1409 | |
| 1410 | int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table) | 
| 1411 | { | 
| 1412 | |
| 1413 | int i; | 
| 1414 | u8 frev, crev; | 
| 1415 | u16 size; | 
| 1416 | |
| 1417 | ATOM_SMU_INFO_V2_1 *psmu_info = | 
| 1418 | (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev, | 
| 1419 | GetIndexIntoMasterTable(DATA, SMU_Info)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, SMU_Info ) / sizeof(USHORT)), | 
| 1420 | &size, &frev, &crev); | 
| 1421 | |
| 1422 | |
| 1423 | for (i = 0; i < psmu_info->ucSclkEntryNum; i++) { | 
| 1424 | table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting; | 
| 1425 | table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv; | 
| 1426 | table->entry[i].usFcw_pcc = | 
| 1427 | le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc)((__uint16_t)(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc) ); | 
| 1428 | table->entry[i].usFcw_trans_upper = | 
| 1429 | le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper)((__uint16_t)(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper )); | 
| 1430 | table->entry[i].usRcw_trans_lower = | 
| 1431 | le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower)((__uint16_t)(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower )); | 
| 1432 | } | 
| 1433 | |
| 1434 | return 0; | 
| 1435 | } | 
| 1436 | |
| 1437 | int atomctrl_get_vddc_shared_railinfo(struct pp_hwmgr *hwmgr, uint8_t *shared_rail) | 
| 1438 | { | 
| 1439 | ATOM_SMU_INFO_V2_1 *psmu_info = | 
| 1440 | (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev, | 
| 1441 | GetIndexIntoMasterTable(DATA, SMU_Info)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, SMU_Info ) / sizeof(USHORT)), | 
| 1442 | NULL((void *)0), NULL((void *)0), NULL((void *)0)); | 
| 1443 | if (!psmu_info) | 
| 1444 | return -1; | 
| 1445 | |
| 1446 | *shared_rail = psmu_info->ucSharePowerSource; | 
| 1447 | |
| 1448 | return 0; | 
| 1449 | } | 
| 1450 | |
| 1451 | int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, | 
| 1452 | struct pp_atom_ctrl__avfs_parameters *param) | 
| 1453 | { | 
| 1454 | ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL((void *)0); | 
| 1455 | |
| 1456 | if (param == NULL((void *)0)) | 
| 1457 | return -EINVAL22; | 
| 1458 | |
| 1459 | profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *) | 
| 1460 | smu_atom_get_data_table(hwmgr->adev, | 
| 1461 | GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, ASIC_ProfilingInfo ) / sizeof(USHORT)), | 
| 1462 | NULL((void *)0), NULL((void *)0), NULL((void *)0)); | 
| 1463 | if (!profile) | 
| 1464 | return -1; | 
| 1465 | |
| 1466 | param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0)((__uint32_t)(profile->ulAVFS_meanNsigma_Acontant0)); | 
| 1467 | param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1)((__uint32_t)(profile->ulAVFS_meanNsigma_Acontant1)); | 
| 1468 | param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2)((__uint32_t)(profile->ulAVFS_meanNsigma_Acontant2)); | 
| 1469 | param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma)((__uint16_t)(profile->usAVFS_meanNsigma_DC_tol_sigma)); | 
| 1470 | param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean)((__uint16_t)(profile->usAVFS_meanNsigma_Platform_mean)); | 
| 1471 | param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma)((__uint16_t)(profile->usAVFS_meanNsigma_Platform_sigma)); | 
| 1472 | param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0)((__uint32_t)(profile->ulGB_VDROOP_TABLE_CKSOFF_a0)); | 
| 1473 | param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1)((__uint32_t)(profile->ulGB_VDROOP_TABLE_CKSOFF_a1)); | 
| 1474 | param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2)((__uint32_t)(profile->ulGB_VDROOP_TABLE_CKSOFF_a2)); | 
| 1475 | param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0)((__uint32_t)(profile->ulGB_VDROOP_TABLE_CKSON_a0)); | 
| 1476 | param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1)((__uint32_t)(profile->ulGB_VDROOP_TABLE_CKSON_a1)); | 
| 1477 | param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2)((__uint32_t)(profile->ulGB_VDROOP_TABLE_CKSON_a2)); | 
| 1478 | param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1)((__uint32_t)(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1)); | 
| 1479 | param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2)((__uint16_t)(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2)); | 
| 1480 | param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b)((__uint32_t)(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b)); | 
| 1481 | param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1)((__uint32_t)(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1)); | 
| 1482 | param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2)((__uint16_t)(profile->usAVFSGB_FUSE_TABLE_CKSON_m2)); | 
| 1483 | param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b)((__uint32_t)(profile->ulAVFSGB_FUSE_TABLE_CKSON_b)); | 
| 1484 | param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv)((__uint16_t)(profile->usMaxVoltage_0_25mv)); | 
| 1485 | param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF; | 
| 1486 | param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON; | 
| 1487 | param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF; | 
| 1488 | param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON; | 
| 1489 | param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor)((__uint16_t)(profile->usPSM_Age_ComFactor)); | 
| 1490 | param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage; | 
| 1491 | |
| 1492 | return 0; | 
| 1493 | } | 
| 1494 | |
| 1495 | int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, | 
| 1496 | uint8_t *svd_gpio_id, uint8_t *svc_gpio_id, | 
| 1497 | uint16_t *load_line) | 
| 1498 | { | 
| 1499 | ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = | 
| 1500 | (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev); | 
| 1501 | |
| 1502 | const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; | 
| 1503 | |
| 1504 | PP_ASSERT_WITH_CODE((NULL != voltage_info),do { if (!((((void *)0) != voltage_info))) { printk("\0014" "amdgpu: " "%s\n", "Could not find Voltage Table in BIOS."); return -22 ; } } while (0) | 
| 1505 | "Could not find Voltage Table in BIOS.", return -EINVAL)do { if (!((((void *)0) != voltage_info))) { printk("\0014" "amdgpu: " "%s\n", "Could not find Voltage Table in BIOS."); return -22 ; } } while (0); | 
| 1506 | |
| 1507 | voltage_object = atomctrl_lookup_voltage_type_v3 | 
| 1508 | (voltage_info, voltage_type, VOLTAGE_OBJ_SVID27); | 
| 1509 | |
| 1510 | *svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId; | 
| 1511 | *svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId; | 
| 1512 | *load_line = voltage_object->asSVID2Obj.usLoadLine_PSI; | 
| 1513 | |
| 1514 | return 0; | 
| 1515 | } | 
| 1516 | |
| 1517 | int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id) | 
| 1518 | { | 
| 1519 | struct amdgpu_device *adev = hwmgr->adev; | 
| 1520 | SET_VOLTAGE_PS_ALLOCATION allocation; | 
| 1521 | SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters = | 
| 1522 | (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage; | 
| 1523 | int result; | 
| 1524 | |
| 1525 | voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID8; | 
| 1526 | |
| 1527 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, | 
| 1528 | GetIndexIntoMasterTable(COMMAND, SetVoltage)(__builtin_offsetof(ATOM_MASTER_LIST_OF_COMMAND_TABLES, SetVoltage ) / sizeof(USHORT)), | 
| 1529 | (uint32_t *)voltage_parameters); | 
| 1530 | |
| 1531 | *virtual_voltage_id = voltage_parameters->usVoltageLevel; | 
| 1532 | |
| 1533 | return result; | 
| 1534 | } | 
| 1535 | |
| 1536 | int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, | 
| 1537 | uint16_t *vddc, uint16_t *vddci, | 
| 1538 | uint16_t virtual_voltage_id, | 
| 1539 | uint16_t efuse_voltage_id) | 
| 1540 | { | 
| 1541 | int i, j; | 
| 1542 | int ix; | 
| 1543 | u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf; | 
| 1544 | ATOM_ASIC_PROFILING_INFO_V2_1 *profile; | 
| 1545 | |
| 1546 | *vddc = 0; | 
| 1547 | *vddci = 0; | 
| 1548 | |
| 1549 | ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, ASIC_ProfilingInfo ) / sizeof(USHORT)); | 
| 1550 | |
| 1551 | profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *) | 
| 1552 | smu_atom_get_data_table(hwmgr->adev, | 
| 1553 | ix, | 
| 1554 | NULL((void *)0), NULL((void *)0), NULL((void *)0)); | 
| 1555 | if (!profile) | 
| 1556 | return -EINVAL22; | 
| 1557 | |
| 1558 | if ((profile->asHeader.ucTableFormatRevision >= 2) && | 
| 1559 | (profile->asHeader.ucTableContentRevision >= 1) && | 
| 1560 | (profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) { | 
| 1561 | leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset); | 
| 1562 | vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset); | 
| 1563 | vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset); | 
| 1564 | if (profile->ucElbVDDC_Num > 0) { | 
| 1565 | for (i = 0; i < profile->ucElbVDDC_Num; i++) { | 
| 1566 | if (vddc_id_buf[i] == virtual_voltage_id) { | 
| 1567 | for (j = 0; j < profile->ucLeakageBinNum; j++) { | 
| 1568 | if (efuse_voltage_id <= leakage_bin[j]) { | 
| 1569 | *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i]; | 
| 1570 | break; | 
| 1571 | } | 
| 1572 | } | 
| 1573 | break; | 
| 1574 | } | 
| 1575 | } | 
| 1576 | } | 
| 1577 | |
| 1578 | vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset); | 
| 1579 | vddci_buf = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset); | 
| 1580 | if (profile->ucElbVDDCI_Num > 0) { | 
| 1581 | for (i = 0; i < profile->ucElbVDDCI_Num; i++) { | 
| 1582 | if (vddci_id_buf[i] == virtual_voltage_id) { | 
| 1583 | for (j = 0; j < profile->ucLeakageBinNum; j++) { | 
| 1584 | if (efuse_voltage_id <= leakage_bin[j]) { | 
| 1585 | *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i]; | 
| 1586 | break; | 
| 1587 | } | 
| 1588 | } | 
| 1589 | break; | 
| 1590 | } | 
| 1591 | } | 
| 1592 | } | 
| 1593 | } | 
| 1594 | |
| 1595 | return 0; | 
| 1596 | } | 
| 1597 | |
| 1598 | void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc, | 
| 1599 | uint32_t *min_vddc) | 
| 1600 | { | 
| 1601 | void *profile; | 
| 1602 | |
| 1603 | profile = smu_atom_get_data_table(hwmgr->adev, | 
| 1604 | GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, ASIC_ProfilingInfo ) / sizeof(USHORT)), | 
| 1605 | NULL((void *)0), NULL((void *)0), NULL((void *)0)); | 
| 1606 | |
| 1607 | if (profile) { | 
| 1608 | switch (hwmgr->chip_id) { | 
| 1609 | case CHIP_TONGA: | 
| 1610 | case CHIP_FIJI: | 
| 1611 | *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc)((__uint32_t)(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)-> ulMaxVddc)) / 4; | 
| 1612 | *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc)((__uint32_t)(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)-> ulMinVddc)) / 4; | 
| 1613 | return; | 
| 1614 | case CHIP_POLARIS11: | 
| 1615 | case CHIP_POLARIS10: | 
| 1616 | case CHIP_POLARIS12: | 
| 1617 | *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc)((__uint32_t)(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)-> ulMaxVddc)) / 100; | 
| 1618 | *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc)((__uint32_t)(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)-> ulMinVddc)) / 100; | 
| 1619 | return; | 
| 1620 | default: | 
| 1621 | break; | 
| 1622 | } | 
| 1623 | } | 
| 1624 | *max_vddc = 0; | 
| 1625 | *min_vddc = 0; | 
| 1626 | } | 
| 1627 | |
| 1628 | int atomctrl_get_edc_hilo_leakage_offset_table(struct pp_hwmgr *hwmgr, | 
| 1629 | AtomCtrl_HiLoLeakageOffsetTable *table) | 
| 1630 | { | 
| 1631 | ATOM_GFX_INFO_V2_3 *gfxinfo = smu_atom_get_data_table(hwmgr->adev, | 
| 1632 | GetIndexIntoMasterTable(DATA, GFX_Info)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, GFX_Info ) / sizeof(USHORT)), | 
| 1633 | NULL((void *)0), NULL((void *)0), NULL((void *)0)); | 
| 1634 | if (!gfxinfo) | 
| 1635 | return -ENOENT2; | 
| 1636 | |
| 1637 | table->usHiLoLeakageThreshold = gfxinfo->usHiLoLeakageThreshold; | 
| 1638 | table->usEdcDidtLoDpm7TableOffset = gfxinfo->usEdcDidtLoDpm7TableOffset; | 
| 1639 | table->usEdcDidtHiDpm7TableOffset = gfxinfo->usEdcDidtHiDpm7TableOffset; | 
| 1640 | |
| 1641 | return 0; | 
| 1642 | } | 
| 1643 | |
| 1644 | static AtomCtrl_EDCLeakgeTable *get_edc_leakage_table(struct pp_hwmgr *hwmgr, | 
| 1645 | uint16_t offset) | 
| 1646 | { | 
| 1647 | void *table_address; | 
| 1648 | char *temp; | 
| 1649 | |
| 1650 | table_address = smu_atom_get_data_table(hwmgr->adev, | 
| 1651 | GetIndexIntoMasterTable(DATA, GFX_Info)(__builtin_offsetof(ATOM_MASTER_LIST_OF_DATA_TABLES, GFX_Info ) / sizeof(USHORT)), | 
| 1652 | NULL((void *)0), NULL((void *)0), NULL((void *)0)); | 
| 1653 | if (!table_address) | 
| 1654 | return NULL((void *)0); | 
| 1655 | |
| 1656 | temp = (char *)table_address; | 
| 1657 | table_address += offset; | 
| Value stored to 'table_address' is never read | |
| 1658 | |
| 1659 | return (AtomCtrl_EDCLeakgeTable *)temp; | 
| 1660 | } | 
| 1661 | |
| 1662 | int atomctrl_get_edc_leakage_table(struct pp_hwmgr *hwmgr, | 
| 1663 | AtomCtrl_EDCLeakgeTable *table, | 
| 1664 | uint16_t offset) | 
| 1665 | { | 
| 1666 | uint32_t length, i; | 
| 1667 | AtomCtrl_EDCLeakgeTable *leakage_table = | 
| 1668 | get_edc_leakage_table(hwmgr, offset); | 
| 1669 | |
| 1670 | if (!leakage_table) | 
| 1671 | return -ENOENT2; | 
| 1672 | |
| 1673 | length = sizeof(leakage_table->DIDT_REG) / | 
| 1674 | sizeof(leakage_table->DIDT_REG[0]); | 
| 1675 | for (i = 0; i < length; i++) | 
| 1676 | table->DIDT_REG[i] = leakage_table->DIDT_REG[i]; | 
| 1677 | |
| 1678 | return 0; | 
| 1679 | } |