File: | dev/pci/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c |
Warning: | line 717, column 3 Value stored to 'fPowerDPMx' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | #include "pp_debug.h" |
24 | #include <linux/module.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/delay.h> |
27 | #include "atom.h" |
28 | #include "ppatomctrl.h" |
29 | #include "atombios.h" |
30 | #include "cgs_common.h" |
31 | #include "ppevvmath.h" |
32 | |
33 | #define MEM_ID_MASK0xff000000 0xff000000 |
34 | #define MEM_ID_SHIFT24 24 |
35 | #define CLOCK_RANGE_MASK0x00ffffff 0x00ffffff |
36 | #define CLOCK_RANGE_SHIFT0 0 |
37 | #define LOW_NIBBLE_MASK0xf 0xf |
38 | #define DATA_EQU_PREV0 0 |
39 | #define DATA_FROM_TABLE4 4 |
40 | |
41 | union voltage_object_info { |
42 | struct _ATOM_VOLTAGE_OBJECT_INFO v1; |
43 | struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2; |
44 | struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3; |
45 | }; |
46 | |
47 | static int atomctrl_retrieve_ac_timing( |
48 | uint8_t index, |
49 | ATOM_INIT_REG_BLOCK *reg_block, |
50 | pp_atomctrl_mc_reg_table *table) |
51 | { |
52 | uint32_t i, j; |
53 | uint8_t tmem_id; |
54 | ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *) |
55 | ((uint8_t *)reg_block + (2 * sizeof(uint16_t)) + le16_to_cpu(reg_block->usRegIndexTblSize)((__uint16_t)(reg_block->usRegIndexTblSize))); |
56 | |
57 | uint8_t num_ranges = 0; |
58 | |
59 | while (*(uint32_t *)reg_data != END_OF_REG_DATA_BLOCK0x00000000 && |
60 | num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES20) { |
61 | tmem_id = (uint8_t)((*(uint32_t *)reg_data & MEM_ID_MASK0xff000000) >> MEM_ID_SHIFT24); |
62 | |
63 | if (index == tmem_id) { |
64 | table->mc_reg_table_entry[num_ranges].mclk_max = |
65 | (uint32_t)((*(uint32_t *)reg_data & CLOCK_RANGE_MASK0x00ffffff) >> |
66 | CLOCK_RANGE_SHIFT0); |
67 | |
68 | for (i = 0, j = 1; i < table->last; i++) { |
69 | if ((table->mc_reg_address[i].uc_pre_reg_data & |
70 | LOW_NIBBLE_MASK0xf) == DATA_FROM_TABLE4) { |
71 | table->mc_reg_table_entry[num_ranges].mc_data[i] = |
72 | (uint32_t)*((uint32_t *)reg_data + j); |
73 | j++; |
74 | } else if ((table->mc_reg_address[i].uc_pre_reg_data & |
75 | LOW_NIBBLE_MASK0xf) == DATA_EQU_PREV0) { |
76 | table->mc_reg_table_entry[num_ranges].mc_data[i] = |
77 | table->mc_reg_table_entry[num_ranges].mc_data[i-1]; |
78 | } |
79 | } |
80 | num_ranges++; |
81 | } |
82 | |
83 | reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *) |
84 | ((uint8_t *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize)((__uint16_t)(reg_block->usRegDataBlkSize))) ; |
85 | } |
86 | |
87 | PP_ASSERT_WITH_CODE((*(uint32_t *)reg_data == END_OF_REG_DATA_BLOCK),do { if (!((*(uint32_t *)reg_data == 0x00000000))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."); return -1; } } while (0) |
88 | "Invalid VramInfo table.", return -1)do { if (!((*(uint32_t *)reg_data == 0x00000000))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."); return -1; } } while (0); |
89 | table->num_entries = num_ranges; |
90 | |
91 | return 0; |
92 | } |
93 | |
94 | /** |
95 | * Get memory clock AC timing registers index from VBIOS table |
96 | * VBIOS set end of memory clock AC timing registers by ucPreRegDataLength bit6 = 1 |
97 | * @param reg_block the address ATOM_INIT_REG_BLOCK |
98 | * @param table the address of MCRegTable |
99 | * @return 0 |
100 | */ |
101 | static int atomctrl_set_mc_reg_address_table( |
102 | ATOM_INIT_REG_BLOCK *reg_block, |
103 | pp_atomctrl_mc_reg_table *table) |
104 | { |
105 | uint8_t i = 0; |
106 | uint8_t num_entries = (uint8_t)((le16_to_cpu(reg_block->usRegIndexTblSize)((__uint16_t)(reg_block->usRegIndexTblSize))) |
107 | / sizeof(ATOM_INIT_REG_INDEX_FORMAT)); |
108 | ATOM_INIT_REG_INDEX_FORMAT *format = ®_block->asRegIndexBuf[0]; |
109 | |
110 | num_entries--; /* subtract 1 data end mark entry */ |
111 | |
112 | PP_ASSERT_WITH_CODE((num_entries <= VBIOS_MC_REGISTER_ARRAY_SIZE),do { if (!((num_entries <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."); return -1; } } while (0) |
113 | "Invalid VramInfo table.", return -1)do { if (!((num_entries <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Invalid VramInfo table."); return -1; } } while (0); |
114 | |
115 | /* ucPreRegDataLength bit6 = 1 is the end of memory clock AC timing registers */ |
116 | while ((!(format->ucPreRegDataLength & ACCESS_PLACEHOLDER0x80)) && |
117 | (i < num_entries)) { |
118 | table->mc_reg_address[i].s1 = |
119 | (uint16_t)(le16_to_cpu(format->usRegIndex)((__uint16_t)(format->usRegIndex))); |
120 | table->mc_reg_address[i].uc_pre_reg_data = |
121 | format->ucPreRegDataLength; |
122 | |
123 | i++; |
124 | format = (ATOM_INIT_REG_INDEX_FORMAT *) |
125 | ((uint8_t *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT)); |
126 | } |
127 | |
128 | table->last = i; |
129 | return 0; |
130 | } |
131 | |
132 | int atomctrl_initialize_mc_reg_table( |
133 | struct pp_hwmgr *hwmgr, |
134 | uint8_t module_index, |
135 | pp_atomctrl_mc_reg_table *table) |
136 | { |
137 | ATOM_VRAM_INFO_HEADER_V2_1 *vram_info; |
138 | ATOM_INIT_REG_BLOCK *reg_block; |
139 | int result = 0; |
140 | u8 frev, crev; |
141 | u16 size; |
142 | |
143 | vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *) |
144 | smu_atom_get_data_table(hwmgr->adev, |
145 | GetIndexIntoMasterTable(DATA, VRAM_Info)(((char*)(&((ATOM_MASTER_LIST_OF_DATA_TABLES*)0)->VRAM_Info )-(char*)0)/sizeof(USHORT)), &size, &frev, &crev); |
146 | |
147 | if (module_index >= vram_info->ucNumOfVRAMModule) { |
148 | pr_err("Invalid VramInfo table.")printk("\0013" "amdgpu: " "Invalid VramInfo table."); |
149 | result = -1; |
150 | } else if (vram_info->sHeader.ucTableFormatRevision < 2) { |
151 | pr_err("Invalid VramInfo table.")printk("\0013" "amdgpu: " "Invalid VramInfo table."); |
152 | result = -1; |
153 | } |
154 | |
155 | if (0 == result) { |
156 | reg_block = (ATOM_INIT_REG_BLOCK *) |
157 | ((uint8_t *)vram_info + le16_to_cpu(vram_info->usMemClkPatchTblOffset)((__uint16_t)(vram_info->usMemClkPatchTblOffset))); |
158 | result = atomctrl_set_mc_reg_address_table(reg_block, table); |
159 | } |
160 | |
161 | if (0 == result) { |
162 | result = atomctrl_retrieve_ac_timing(module_index, |
163 | reg_block, table); |
164 | } |
165 | |
166 | return result; |
167 | } |
168 | |
169 | /** |
170 | * Set DRAM timings based on engine clock and memory clock. |
171 | */ |
172 | int atomctrl_set_engine_dram_timings_rv770( |
173 | struct pp_hwmgr *hwmgr, |
174 | uint32_t engine_clock, |
175 | uint32_t memory_clock) |
176 | { |
177 | struct amdgpu_device *adev = hwmgr->adev; |
178 | |
179 | SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters; |
180 | |
181 | /* They are both in 10KHz Units. */ |
182 | engine_clock_parameters.ulTargetEngineClock = |
183 | cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |((__uint32_t)((engine_clock & 0x00FFFFFF) | ((2 << 24 )))) |
184 | ((COMPUTE_ENGINE_PLL_PARAM << 24)))((__uint32_t)((engine_clock & 0x00FFFFFF) | ((2 << 24 )))); |
185 | |
186 | /* in 10 khz units.*/ |
187 | engine_clock_parameters.sReserved.ulClock = |
188 | cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK)((__uint32_t)(memory_clock & 0x00FFFFFF)); |
189 | |
190 | return amdgpu_atom_execute_table(adev->mode_info.atom_context, |
191 | GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->DynamicMemorySettings )-(char*)0)/sizeof(USHORT)), |
192 | (uint32_t *)&engine_clock_parameters); |
193 | } |
194 | |
195 | /** |
196 | * Private Function to get the PowerPlay Table Address. |
197 | * WARNING: The tabled returned by this function is in |
198 | * dynamically allocated memory. |
199 | * The caller has to release if by calling kfree. |
200 | */ |
201 | static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device) |
202 | { |
203 | int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo)(((char*)(&((ATOM_MASTER_LIST_OF_DATA_TABLES*)0)->VoltageObjectInfo )-(char*)0)/sizeof(USHORT)); |
204 | u8 frev, crev; |
205 | u16 size; |
206 | union voltage_object_info *voltage_info; |
207 | |
208 | voltage_info = (union voltage_object_info *) |
209 | smu_atom_get_data_table(device, index, |
210 | &size, &frev, &crev); |
211 | |
212 | if (voltage_info != NULL((void *)0)) |
213 | return (ATOM_VOLTAGE_OBJECT_INFO *) &(voltage_info->v3); |
214 | else |
215 | return NULL((void *)0); |
216 | } |
217 | |
218 | static const ATOM_VOLTAGE_OBJECT_V3 *atomctrl_lookup_voltage_type_v3( |
219 | const ATOM_VOLTAGE_OBJECT_INFO_V3_1 * voltage_object_info_table, |
220 | uint8_t voltage_type, uint8_t voltage_mode) |
221 | { |
222 | unsigned int size = le16_to_cpu(voltage_object_info_table->sHeader.usStructureSize)((__uint16_t)(voltage_object_info_table->sHeader.usStructureSize )); |
223 | unsigned int offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0])__builtin_offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj [0]); |
224 | uint8_t *start = (uint8_t *)voltage_object_info_table; |
225 | |
226 | while (offset < size) { |
227 | const ATOM_VOLTAGE_OBJECT_V3 *voltage_object = |
228 | (const ATOM_VOLTAGE_OBJECT_V3 *)(start + offset); |
229 | |
230 | if (voltage_type == voltage_object->asGpioVoltageObj.sHeader.ucVoltageType && |
231 | voltage_mode == voltage_object->asGpioVoltageObj.sHeader.ucVoltageMode) |
232 | return voltage_object; |
233 | |
234 | offset += le16_to_cpu(voltage_object->asGpioVoltageObj.sHeader.usSize)((__uint16_t)(voltage_object->asGpioVoltageObj.sHeader.usSize )); |
235 | } |
236 | |
237 | return NULL((void *)0); |
238 | } |
239 | |
240 | /** atomctrl_get_memory_pll_dividers_si(). |
241 | * |
242 | * @param hwmgr input parameter: pointer to HwMgr |
243 | * @param clock_value input parameter: memory clock |
244 | * @param dividers output parameter: memory PLL dividers |
245 | * @param strobe_mode input parameter: 1 for strobe mode, 0 for performance mode |
246 | */ |
247 | int atomctrl_get_memory_pll_dividers_si( |
248 | struct pp_hwmgr *hwmgr, |
249 | uint32_t clock_value, |
250 | pp_atomctrl_memory_clock_param *mpll_param, |
251 | bool_Bool strobe_mode) |
252 | { |
253 | struct amdgpu_device *adev = hwmgr->adev; |
254 | COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters; |
255 | int result; |
256 | |
257 | mpll_parameters.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); |
258 | mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0); |
259 | |
260 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
261 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ComputeMemoryClockParam )-(char*)0)/sizeof(USHORT)), |
262 | (uint32_t *)&mpll_parameters); |
263 | |
264 | if (0 == result) { |
265 | mpll_param->mpll_fb_divider.clk_frac = |
266 | le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac)((__uint16_t)(mpll_parameters.ulFbDiv.usFbDivFrac)); |
267 | mpll_param->mpll_fb_divider.cl_kf = |
268 | le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv)((__uint16_t)(mpll_parameters.ulFbDiv.usFbDiv)); |
269 | mpll_param->mpll_post_divider = |
270 | (uint32_t)mpll_parameters.ucPostDiv; |
271 | mpll_param->vco_mode = |
272 | (uint32_t)(mpll_parameters.ucPllCntlFlag & |
273 | MPLL_CNTL_FLAG_VCO_MODE_MASK0x03); |
274 | mpll_param->yclk_sel = |
275 | (uint32_t)((mpll_parameters.ucPllCntlFlag & |
276 | MPLL_CNTL_FLAG_BYPASS_DQ_PLL0x04) ? 1 : 0); |
277 | mpll_param->qdr = |
278 | (uint32_t)((mpll_parameters.ucPllCntlFlag & |
279 | MPLL_CNTL_FLAG_QDR_ENABLE0x08) ? 1 : 0); |
280 | mpll_param->half_rate = |
281 | (uint32_t)((mpll_parameters.ucPllCntlFlag & |
282 | MPLL_CNTL_FLAG_AD_HALF_RATE0x10) ? 1 : 0); |
283 | mpll_param->dll_speed = |
284 | (uint32_t)(mpll_parameters.ucDllSpeed); |
285 | mpll_param->bw_ctrl = |
286 | (uint32_t)(mpll_parameters.ucBWCntl); |
287 | } |
288 | |
289 | return result; |
290 | } |
291 | |
292 | /** atomctrl_get_memory_pll_dividers_vi(). |
293 | * |
294 | * @param hwmgr input parameter: pointer to HwMgr |
295 | * @param clock_value input parameter: memory clock |
296 | * @param dividers output parameter: memory PLL dividers |
297 | */ |
298 | int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, |
299 | uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param) |
300 | { |
301 | struct amdgpu_device *adev = hwmgr->adev; |
302 | COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters; |
303 | int result; |
304 | |
305 | mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); |
306 | |
307 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
308 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ComputeMemoryClockParam )-(char*)0)/sizeof(USHORT)), |
309 | (uint32_t *)&mpll_parameters); |
310 | |
311 | if (!result) |
312 | mpll_param->mpll_post_divider = |
313 | (uint32_t)mpll_parameters.ulClock.ucPostDiv; |
314 | |
315 | return result; |
316 | } |
317 | |
318 | int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr, |
319 | uint32_t clock_value, |
320 | pp_atomctrl_memory_clock_param_ai *mpll_param) |
321 | { |
322 | struct amdgpu_device *adev = hwmgr->adev; |
323 | COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0}; |
324 | int result; |
325 | |
326 | mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); |
327 | |
328 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
329 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ComputeMemoryClockParam )-(char*)0)/sizeof(USHORT)), |
330 | (uint32_t *)&mpll_parameters); |
331 | |
332 | /* VEGAM's mpll takes sometime to finish computing */ |
333 | udelay(10); |
334 | |
335 | if (!result) { |
336 | mpll_param->ulMclk_fcw_int = |
337 | le16_to_cpu(mpll_parameters.usMclk_fcw_int)((__uint16_t)(mpll_parameters.usMclk_fcw_int)); |
338 | mpll_param->ulMclk_fcw_frac = |
339 | le16_to_cpu(mpll_parameters.usMclk_fcw_frac)((__uint16_t)(mpll_parameters.usMclk_fcw_frac)); |
340 | mpll_param->ulClock = |
341 | le32_to_cpu(mpll_parameters.ulClock.ulClock)((__uint32_t)(mpll_parameters.ulClock.ulClock)); |
342 | mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv; |
343 | } |
344 | |
345 | return result; |
346 | } |
347 | |
348 | int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, |
349 | uint32_t clock_value, |
350 | pp_atomctrl_clock_dividers_kong *dividers) |
351 | { |
352 | struct amdgpu_device *adev = hwmgr->adev; |
353 | COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters; |
354 | int result; |
355 | |
356 | pll_parameters.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); |
357 | |
358 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
359 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ComputeMemoryEnginePLL )-(char*)0)/sizeof(USHORT)), |
360 | (uint32_t *)&pll_parameters); |
361 | |
362 | if (0 == result) { |
363 | dividers->pll_post_divider = pll_parameters.ucPostDiv; |
364 | dividers->real_clock = le32_to_cpu(pll_parameters.ulClock)((__uint32_t)(pll_parameters.ulClock)); |
365 | } |
366 | |
367 | return result; |
368 | } |
369 | |
370 | int atomctrl_get_engine_pll_dividers_vi( |
371 | struct pp_hwmgr *hwmgr, |
372 | uint32_t clock_value, |
373 | pp_atomctrl_clock_dividers_vi *dividers) |
374 | { |
375 | struct amdgpu_device *adev = hwmgr->adev; |
376 | COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; |
377 | int result; |
378 | |
379 | pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); |
380 | pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK0x01; |
381 | |
382 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
383 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ComputeMemoryEnginePLL )-(char*)0)/sizeof(USHORT)), |
384 | (uint32_t *)&pll_patameters); |
385 | |
386 | if (0 == result) { |
387 | dividers->pll_post_divider = |
388 | pll_patameters.ulClock.ucPostDiv; |
389 | dividers->real_clock = |
390 | le32_to_cpu(pll_patameters.ulClock.ulClock)((__uint32_t)(pll_patameters.ulClock.ulClock)); |
391 | |
392 | dividers->ul_fb_div.ul_fb_div_frac = |
393 | le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac)((__uint16_t)(pll_patameters.ulFbDiv.usFbDivFrac)); |
394 | dividers->ul_fb_div.ul_fb_div = |
395 | le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv)((__uint16_t)(pll_patameters.ulFbDiv.usFbDiv)); |
396 | |
397 | dividers->uc_pll_ref_div = |
398 | pll_patameters.ucPllRefDiv; |
399 | dividers->uc_pll_post_div = |
400 | pll_patameters.ucPllPostDiv; |
401 | dividers->uc_pll_cntl_flag = |
402 | pll_patameters.ucPllCntlFlag; |
403 | } |
404 | |
405 | return result; |
406 | } |
407 | |
408 | int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, |
409 | uint32_t clock_value, |
410 | pp_atomctrl_clock_dividers_ai *dividers) |
411 | { |
412 | struct amdgpu_device *adev = hwmgr->adev; |
413 | COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters; |
414 | int result; |
415 | |
416 | pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); |
417 | pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK0x01; |
418 | |
419 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
420 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ComputeMemoryEnginePLL )-(char*)0)/sizeof(USHORT)), |
421 | (uint32_t *)&pll_patameters); |
422 | |
423 | if (0 == result) { |
424 | dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac)((__uint16_t)(pll_patameters.usSclk_fcw_frac)); |
425 | dividers->usSclk_fcw_int = le16_to_cpu(pll_patameters.usSclk_fcw_int)((__uint16_t)(pll_patameters.usSclk_fcw_int)); |
426 | dividers->ucSclkPostDiv = pll_patameters.ucSclkPostDiv; |
427 | dividers->ucSclkVcoMode = pll_patameters.ucSclkVcoMode; |
428 | dividers->ucSclkPllRange = pll_patameters.ucSclkPllRange; |
429 | dividers->ucSscEnable = pll_patameters.ucSscEnable; |
430 | dividers->usSsc_fcw1_frac = le16_to_cpu(pll_patameters.usSsc_fcw1_frac)((__uint16_t)(pll_patameters.usSsc_fcw1_frac)); |
431 | dividers->usSsc_fcw1_int = le16_to_cpu(pll_patameters.usSsc_fcw1_int)((__uint16_t)(pll_patameters.usSsc_fcw1_int)); |
432 | dividers->usPcc_fcw_int = le16_to_cpu(pll_patameters.usPcc_fcw_int)((__uint16_t)(pll_patameters.usPcc_fcw_int)); |
433 | dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac)((__uint16_t)(pll_patameters.usSsc_fcw_slew_frac)); |
434 | dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac)((__uint16_t)(pll_patameters.usPcc_fcw_slew_frac)); |
435 | } |
436 | return result; |
437 | } |
438 | |
439 | int atomctrl_get_dfs_pll_dividers_vi( |
440 | struct pp_hwmgr *hwmgr, |
441 | uint32_t clock_value, |
442 | pp_atomctrl_clock_dividers_vi *dividers) |
443 | { |
444 | struct amdgpu_device *adev = hwmgr->adev; |
445 | COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; |
446 | int result; |
447 | |
448 | pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value)((__uint32_t)(clock_value)); |
449 | pll_patameters.ulClock.ucPostDiv = |
450 | COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK0x00; |
451 | |
452 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
453 | GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ComputeMemoryEnginePLL )-(char*)0)/sizeof(USHORT)), |
454 | (uint32_t *)&pll_patameters); |
455 | |
456 | if (0 == result) { |
457 | dividers->pll_post_divider = |
458 | pll_patameters.ulClock.ucPostDiv; |
459 | dividers->real_clock = |
460 | le32_to_cpu(pll_patameters.ulClock.ulClock)((__uint32_t)(pll_patameters.ulClock.ulClock)); |
461 | |
462 | dividers->ul_fb_div.ul_fb_div_frac = |
463 | le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac)((__uint16_t)(pll_patameters.ulFbDiv.usFbDivFrac)); |
464 | dividers->ul_fb_div.ul_fb_div = |
465 | le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv)((__uint16_t)(pll_patameters.ulFbDiv.usFbDiv)); |
466 | |
467 | dividers->uc_pll_ref_div = |
468 | pll_patameters.ucPllRefDiv; |
469 | dividers->uc_pll_post_div = |
470 | pll_patameters.ucPllPostDiv; |
471 | dividers->uc_pll_cntl_flag = |
472 | pll_patameters.ucPllCntlFlag; |
473 | } |
474 | |
475 | return result; |
476 | } |
477 | |
478 | /** |
479 | * Get the reference clock in 10KHz |
480 | */ |
481 | uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr) |
482 | { |
483 | ATOM_FIRMWARE_INFO *fw_info; |
484 | u8 frev, crev; |
485 | u16 size; |
486 | uint32_t clock; |
487 | |
488 | fw_info = (ATOM_FIRMWARE_INFO *) |
489 | smu_atom_get_data_table(hwmgr->adev, |
490 | GetIndexIntoMasterTable(DATA, FirmwareInfo)(((char*)(&((ATOM_MASTER_LIST_OF_DATA_TABLES*)0)->FirmwareInfo )-(char*)0)/sizeof(USHORT)), |
491 | &size, &frev, &crev); |
492 | |
493 | if (fw_info == NULL((void *)0)) |
494 | clock = 2700; |
495 | else |
496 | clock = (uint32_t)(le16_to_cpu(fw_info->usReferenceClock)((__uint16_t)(fw_info->usReferenceClock))); |
497 | |
498 | return clock; |
499 | } |
500 | |
501 | /** |
502 | * Returns true if the given voltage type is controlled by GPIO pins. |
503 | * voltage_type is one of SET_VOLTAGE_TYPE_ASIC_VDDC, |
504 | * SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ. |
505 | * voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE |
506 | */ |
507 | bool_Bool atomctrl_is_voltage_controlled_by_gpio_v3( |
508 | struct pp_hwmgr *hwmgr, |
509 | uint8_t voltage_type, |
510 | uint8_t voltage_mode) |
511 | { |
512 | ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = |
513 | (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev); |
514 | bool_Bool ret; |
515 | |
516 | PP_ASSERT_WITH_CODE((NULL != voltage_info),do { if (!((((void *)0) != voltage_info))) { printk("\0014" "amdgpu: " "%s\n", "Could not find Voltage Table in BIOS."); return 0;; } } while (0) |
517 | "Could not find Voltage Table in BIOS.", return false;)do { if (!((((void *)0) != voltage_info))) { printk("\0014" "amdgpu: " "%s\n", "Could not find Voltage Table in BIOS."); return 0;; } } while (0); |
518 | |
519 | ret = (NULL((void *)0) != atomctrl_lookup_voltage_type_v3 |
520 | (voltage_info, voltage_type, voltage_mode)) ? true1 : false0; |
521 | |
522 | return ret; |
523 | } |
524 | |
525 | int atomctrl_get_voltage_table_v3( |
526 | struct pp_hwmgr *hwmgr, |
527 | uint8_t voltage_type, |
528 | uint8_t voltage_mode, |
529 | pp_atomctrl_voltage_table *voltage_table) |
530 | { |
531 | ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = |
532 | (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev); |
533 | const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; |
534 | unsigned int i; |
535 | |
536 | PP_ASSERT_WITH_CODE((NULL != voltage_info),do { if (!((((void *)0) != voltage_info))) { printk("\0014" "amdgpu: " "%s\n", "Could not find Voltage Table in BIOS."); return -1; ; } } while (0) |
537 | "Could not find Voltage Table in BIOS.", return -1;)do { if (!((((void *)0) != voltage_info))) { printk("\0014" "amdgpu: " "%s\n", "Could not find Voltage Table in BIOS."); return -1; ; } } while (0); |
538 | |
539 | voltage_object = atomctrl_lookup_voltage_type_v3 |
540 | (voltage_info, voltage_type, voltage_mode); |
541 | |
542 | if (voltage_object == NULL((void *)0)) |
543 | return -1; |
544 | |
545 | PP_ASSERT_WITH_CODE(do { if (!((voltage_object->asGpioVoltageObj.ucGpioEntryNum <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Too many voltage entries!" ); return -1;; } } while (0) |
546 | (voltage_object->asGpioVoltageObj.ucGpioEntryNum <=do { if (!((voltage_object->asGpioVoltageObj.ucGpioEntryNum <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Too many voltage entries!" ); return -1;; } } while (0) |
547 | PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES),do { if (!((voltage_object->asGpioVoltageObj.ucGpioEntryNum <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Too many voltage entries!" ); return -1;; } } while (0) |
548 | "Too many voltage entries!",do { if (!((voltage_object->asGpioVoltageObj.ucGpioEntryNum <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Too many voltage entries!" ); return -1;; } } while (0) |
549 | return -1;do { if (!((voltage_object->asGpioVoltageObj.ucGpioEntryNum <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Too many voltage entries!" ); return -1;; } } while (0) |
550 | )do { if (!((voltage_object->asGpioVoltageObj.ucGpioEntryNum <= 32))) { printk("\0014" "amdgpu: " "%s\n", "Too many voltage entries!" ); return -1;; } } while (0); |
551 | |
552 | for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) { |
553 | voltage_table->entries[i].value = |
554 | le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue)((__uint16_t)(voltage_object->asGpioVoltageObj.asVolGpioLut [i].usVoltageValue)); |
555 | voltage_table->entries[i].smio_low = |
556 | le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId)((__uint32_t)(voltage_object->asGpioVoltageObj.asVolGpioLut [i].ulVoltageId)); |
557 | } |
558 | |
559 | voltage_table->mask_low = |
560 | le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal)((__uint32_t)(voltage_object->asGpioVoltageObj.ulGpioMaskVal )); |
561 | voltage_table->count = |
562 | voltage_object->asGpioVoltageObj.ucGpioEntryNum; |
563 | voltage_table->phase_delay = |
564 | voltage_object->asGpioVoltageObj.ucPhaseDelay; |
565 | |
566 | return 0; |
567 | } |
568 | |
569 | static bool_Bool atomctrl_lookup_gpio_pin( |
570 | ATOM_GPIO_PIN_LUT * gpio_lookup_table, |
571 | const uint32_t pinId, |
572 | pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment) |
573 | { |
574 | unsigned int size = le16_to_cpu(gpio_lookup_table->sHeader.usStructureSize)((__uint16_t)(gpio_lookup_table->sHeader.usStructureSize)); |
575 | unsigned int offset = offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0])__builtin_offsetof(ATOM_GPIO_PIN_LUT, asGPIO_Pin[0]); |
576 | uint8_t *start = (uint8_t *)gpio_lookup_table; |
577 | |
578 | while (offset < size) { |
579 | const ATOM_GPIO_PIN_ASSIGNMENT *pin_assignment = |
580 | (const ATOM_GPIO_PIN_ASSIGNMENT *)(start + offset); |
581 | |
582 | if (pinId == pin_assignment->ucGPIO_ID) { |
583 | gpio_pin_assignment->uc_gpio_pin_bit_shift = |
584 | pin_assignment->ucGpioPinBitShift; |
585 | gpio_pin_assignment->us_gpio_pin_aindex = |
586 | le16_to_cpu(pin_assignment->usGpioPin_AIndex)((__uint16_t)(pin_assignment->usGpioPin_AIndex)); |
587 | return true1; |
588 | } |
589 | |
590 | offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID)__builtin_offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1; |
591 | } |
592 | |
593 | return false0; |
594 | } |
595 | |
596 | /** |
597 | * Private Function to get the PowerPlay Table Address. |
598 | * WARNING: The tabled returned by this function is in |
599 | * dynamically allocated memory. |
600 | * The caller has to release if by calling kfree. |
601 | */ |
602 | static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device) |
603 | { |
604 | u8 frev, crev; |
605 | u16 size; |
606 | void *table_address; |
607 | |
608 | table_address = (ATOM_GPIO_PIN_LUT *) |
609 | smu_atom_get_data_table(device, |
610 | GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT)(((char*)(&((ATOM_MASTER_LIST_OF_DATA_TABLES*)0)->GPIO_Pin_LUT )-(char*)0)/sizeof(USHORT)), |
611 | &size, &frev, &crev); |
612 | |
613 | PP_ASSERT_WITH_CODE((NULL != table_address),do { if (!((((void *)0) != table_address))) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving BIOS Table Address!"); return ((void *)0);; } } while (0) |
614 | "Error retrieving BIOS Table Address!", return NULL;)do { if (!((((void *)0) != table_address))) { printk("\0014" "amdgpu: " "%s\n", "Error retrieving BIOS Table Address!"); return ((void *)0);; } } while (0); |
615 | |
616 | return (ATOM_GPIO_PIN_LUT *)table_address; |
617 | } |
618 | |
619 | /** |
620 | * Returns 1 if the given pin id find in lookup table. |
621 | */ |
622 | bool_Bool atomctrl_get_pp_assign_pin( |
623 | struct pp_hwmgr *hwmgr, |
624 | const uint32_t pinId, |
625 | pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment) |
626 | { |
627 | bool_Bool bRet = false0; |
628 | ATOM_GPIO_PIN_LUT *gpio_lookup_table = |
629 | get_gpio_lookup_table(hwmgr->adev); |
630 | |
631 | PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),do { if (!((((void *)0) != gpio_lookup_table))) { printk("\0014" "amdgpu: " "%s\n", "Could not find GPIO lookup Table in BIOS." ); return 0; } } while (0) |
632 | "Could not find GPIO lookup Table in BIOS.", return false)do { if (!((((void *)0) != gpio_lookup_table))) { printk("\0014" "amdgpu: " "%s\n", "Could not find GPIO lookup Table in BIOS." ); return 0; } } while (0); |
633 | |
634 | bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId, |
635 | gpio_pin_assignment); |
636 | |
637 | return bRet; |
638 | } |
639 | |
640 | int atomctrl_calculate_voltage_evv_on_sclk( |
641 | struct pp_hwmgr *hwmgr, |
642 | uint8_t voltage_type, |
643 | uint32_t sclk, |
644 | uint16_t virtual_voltage_Id, |
645 | uint16_t *voltage, |
646 | uint16_t dpm_level, |
647 | bool_Bool debug) |
648 | { |
649 | ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo; |
650 | struct amdgpu_device *adev = hwmgr->adev; |
651 | EFUSE_LINEAR_FUNC_PARAM sRO_fuse; |
652 | EFUSE_LINEAR_FUNC_PARAM sCACm_fuse; |
653 | EFUSE_LINEAR_FUNC_PARAM sCACb_fuse; |
654 | EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse; |
655 | EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse; |
656 | EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse; |
657 | EFUSE_INPUT_PARAMETER sInput_FuseValues; |
658 | READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues; |
659 | |
660 | uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused; |
661 | fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7; |
662 | fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma; |
663 | fInt fLkg_FT, repeat; |
664 | fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX; |
665 | fInt fRLL_LoadLine, fPowerDPMx, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin; |
666 | fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM; |
667 | fInt fSclk_margin, fSclk, fEVV_V; |
668 | fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL; |
669 | uint32_t ul_FT_Lkg_V0NORM; |
670 | fInt fLn_MaxDivMin, fMin, fAverage, fRange; |
671 | fInt fRoots[2]; |
672 | fInt fStepSize = GetScaledFraction(625, 100000); |
673 | |
674 | int result; |
675 | |
676 | getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *) |
677 | smu_atom_get_data_table(hwmgr->adev, |
678 | GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo)(((char*)(&((ATOM_MASTER_LIST_OF_DATA_TABLES*)0)->ASIC_ProfilingInfo )-(char*)0)/sizeof(USHORT)), |
679 | NULL((void *)0), NULL((void *)0), NULL((void *)0)); |
680 | |
681 | if (!getASICProfilingInfo) |
682 | return -1; |
683 | |
684 | if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || |
685 | (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && |
686 | getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) |
687 | return -1; |
688 | |
689 | /*----------------------------------------------------------- |
690 | *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL |
691 | *----------------------------------------------------------- |
692 | */ |
693 | fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000); |
694 | |
695 | switch (dpm_level) { |
696 | case 1: |
697 | fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1)((__uint16_t)(getASICProfilingInfo->usPowerDpm1))); |
698 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM1)), 1000); |
699 | break; |
700 | case 2: |
701 | fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2)((__uint16_t)(getASICProfilingInfo->usPowerDpm2))); |
702 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM2)), 1000); |
703 | break; |
704 | case 3: |
705 | fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3)((__uint16_t)(getASICProfilingInfo->usPowerDpm3))); |
706 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM3)), 1000); |
707 | break; |
708 | case 4: |
709 | fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4)((__uint16_t)(getASICProfilingInfo->usPowerDpm4))); |
710 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM4)), 1000); |
711 | break; |
712 | case 5: |
713 | fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5)((__uint16_t)(getASICProfilingInfo->usPowerDpm5))); |
714 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM5)), 1000); |
715 | break; |
716 | case 6: |
717 | fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6)((__uint16_t)(getASICProfilingInfo->usPowerDpm6))); |
Value stored to 'fPowerDPMx' is never read | |
718 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM6)), 1000); |
719 | break; |
720 | case 7: |
721 | fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7)((__uint16_t)(getASICProfilingInfo->usPowerDpm7))); |
722 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM7)), 1000); |
723 | break; |
724 | default: |
725 | pr_err("DPM Level not supported\n")printk("\0013" "amdgpu: " "DPM Level not supported\n"); |
726 | fPowerDPMx = Convert_ULONG_ToFraction(1); |
727 | fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0)((__uint32_t)(getASICProfilingInfo->ulTdpDerateDPM0)), 1000); |
728 | } |
729 | |
730 | /*------------------------- |
731 | * DECODING FUSE VALUES |
732 | * ------------------------ |
733 | */ |
734 | /*Decode RO_Fused*/ |
735 | sRO_fuse = getASICProfilingInfo->sRoFuse; |
736 | |
737 | sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex; |
738 | sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB; |
739 | sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength; |
740 | |
741 | sOutput_FuseValues.sEfuse = sInput_FuseValues; |
742 | |
743 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
744 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ReadEfuseValue )-(char*)0)/sizeof(USHORT)), |
745 | (uint32_t *)&sOutput_FuseValues); |
746 | |
747 | if (result) |
748 | return result; |
749 | |
750 | /* Finally, the actual fuse value */ |
751 | ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); |
752 | fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin)((__uint32_t)(sRO_fuse.ulEfuseMin)), 1); |
753 | fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange)((__uint32_t)(sRO_fuse.ulEfuseEncodeRange)), 1); |
754 | fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength); |
755 | |
756 | sCACm_fuse = getASICProfilingInfo->sCACm; |
757 | |
758 | sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex; |
759 | sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB; |
760 | sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength; |
761 | |
762 | sOutput_FuseValues.sEfuse = sInput_FuseValues; |
763 | |
764 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
765 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ReadEfuseValue )-(char*)0)/sizeof(USHORT)), |
766 | (uint32_t *)&sOutput_FuseValues); |
767 | |
768 | if (result) |
769 | return result; |
770 | |
771 | ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); |
772 | fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin)((__uint32_t)(sCACm_fuse.ulEfuseMin)), 1000); |
773 | fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange)((__uint32_t)(sCACm_fuse.ulEfuseEncodeRange)), 1000); |
774 | |
775 | fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength); |
776 | |
777 | sCACb_fuse = getASICProfilingInfo->sCACb; |
778 | |
779 | sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex; |
780 | sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB; |
781 | sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength; |
782 | sOutput_FuseValues.sEfuse = sInput_FuseValues; |
783 | |
784 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
785 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ReadEfuseValue )-(char*)0)/sizeof(USHORT)), |
786 | (uint32_t *)&sOutput_FuseValues); |
787 | |
788 | if (result) |
789 | return result; |
790 | |
791 | ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); |
792 | fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin)((__uint32_t)(sCACb_fuse.ulEfuseMin)), 1000); |
793 | fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange)((__uint32_t)(sCACb_fuse.ulEfuseEncodeRange)), 1000); |
794 | |
795 | fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength); |
796 | |
797 | sKt_Beta_fuse = getASICProfilingInfo->sKt_b; |
798 | |
799 | sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex; |
800 | sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB; |
801 | sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength; |
802 | |
803 | sOutput_FuseValues.sEfuse = sInput_FuseValues; |
804 | |
805 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
806 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ReadEfuseValue )-(char*)0)/sizeof(USHORT)), |
807 | (uint32_t *)&sOutput_FuseValues); |
808 | |
809 | if (result) |
810 | return result; |
811 | |
812 | ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); |
813 | fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage)((__uint32_t)(sKt_Beta_fuse.ulEfuseEncodeAverage)), 1000); |
814 | fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange)((__uint32_t)(sKt_Beta_fuse.ulEfuseEncodeRange)), 1000); |
815 | |
816 | fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused, |
817 | fAverage, fRange, sKt_Beta_fuse.ucEfuseLength); |
818 | |
819 | sKv_m_fuse = getASICProfilingInfo->sKv_m; |
820 | |
821 | sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex; |
822 | sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB; |
823 | sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength; |
824 | |
825 | sOutput_FuseValues.sEfuse = sInput_FuseValues; |
826 | |
827 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
828 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ReadEfuseValue )-(char*)0)/sizeof(USHORT)), |
829 | (uint32_t *)&sOutput_FuseValues); |
830 | if (result) |
831 | return result; |
832 | |
833 | ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); |
834 | fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage)((__uint32_t)(sKv_m_fuse.ulEfuseEncodeAverage)), 1000); |
835 | fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange)((__uint32_t)(sKv_m_fuse.ulEfuseEncodeRange)) & 0x7fffffff), 1000); |
836 | fRange = fMultiply(fRange, ConvertToFraction(-1)); |
837 | |
838 | fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused, |
839 | fAverage, fRange, sKv_m_fuse.ucEfuseLength); |
840 | |
841 | sKv_b_fuse = getASICProfilingInfo->sKv_b; |
842 | |
843 | sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex; |
844 | sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB; |
845 | sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength; |
846 | sOutput_FuseValues.sEfuse = sInput_FuseValues; |
847 | |
848 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
849 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ReadEfuseValue )-(char*)0)/sizeof(USHORT)), |
850 | (uint32_t *)&sOutput_FuseValues); |
851 | |
852 | if (result) |
853 | return result; |
854 | |
855 | ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); |
856 | fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage)((__uint32_t)(sKv_b_fuse.ulEfuseEncodeAverage)), 1000); |
857 | fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange)((__uint32_t)(sKv_b_fuse.ulEfuseEncodeRange)), 1000); |
858 | |
859 | fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused, |
860 | fAverage, fRange, sKv_b_fuse.ucEfuseLength); |
861 | |
862 | /* Decoding the Leakage - No special struct container */ |
863 | /* |
864 | * usLkgEuseIndex=56 |
865 | * ucLkgEfuseBitLSB=6 |
866 | * ucLkgEfuseLength=10 |
867 | * ulLkgEncodeLn_MaxDivMin=69077 |
868 | * ulLkgEncodeMax=1000000 |
869 | * ulLkgEncodeMin=1000 |
870 | * ulEfuseLogisticAlpha=13 |
871 | */ |
872 | |
873 | sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex; |
874 | sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB; |
875 | sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength; |
876 | |
877 | sOutput_FuseValues.sEfuse = sInput_FuseValues; |
878 | |
879 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
880 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ReadEfuseValue )-(char*)0)/sizeof(USHORT)), |
881 | (uint32_t *)&sOutput_FuseValues); |
882 | |
883 | if (result) |
884 | return result; |
885 | |
886 | ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue)((__uint32_t)(sOutput_FuseValues.ulEfuseValue)); |
887 | fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin)((__uint32_t)(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin )), 10000); |
888 | fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin)((__uint32_t)(getASICProfilingInfo->ulLkgEncodeMin)), 10000); |
889 | |
890 | fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM, |
891 | fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength); |
892 | fLkg_FT = fFT_Lkg_V0NORM; |
893 | |
894 | /*------------------------------------------- |
895 | * PART 2 - Grabbing all required values |
896 | *------------------------------------------- |
897 | */ |
898 | fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0)((__uint32_t)(getASICProfilingInfo->ulSM_A0)), 1000000), |
899 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign))); |
900 | fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1)((__uint32_t)(getASICProfilingInfo->ulSM_A1)), 1000000), |
901 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign))); |
902 | fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2)((__uint32_t)(getASICProfilingInfo->ulSM_A2)), 100000), |
903 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign))); |
904 | fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3)((__uint32_t)(getASICProfilingInfo->ulSM_A3)), 1000000), |
905 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign))); |
906 | fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4)((__uint32_t)(getASICProfilingInfo->ulSM_A4)), 1000000), |
907 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign))); |
908 | fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5)((__uint32_t)(getASICProfilingInfo->ulSM_A5)), 1000), |
909 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign))); |
910 | fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6)((__uint32_t)(getASICProfilingInfo->ulSM_A6)), 1000), |
911 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign))); |
912 | fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7)((__uint32_t)(getASICProfilingInfo->ulSM_A7)), 1000), |
913 | ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign))); |
914 | |
915 | fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a)((__uint32_t)(getASICProfilingInfo->ulMargin_RO_a))); |
916 | fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b)((__uint32_t)(getASICProfilingInfo->ulMargin_RO_b))); |
917 | fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c)((__uint32_t)(getASICProfilingInfo->ulMargin_RO_c))); |
918 | |
919 | fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed)((__uint32_t)(getASICProfilingInfo->ulMargin_fixed))); |
920 | |
921 | fMargin_FMAX_mean = GetScaledFraction( |
922 | le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean)((__uint32_t)(getASICProfilingInfo->ulMargin_Fmax_mean)), 10000); |
923 | fMargin_Plat_mean = GetScaledFraction( |
924 | le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean)((__uint32_t)(getASICProfilingInfo->ulMargin_plat_mean)), 10000); |
925 | fMargin_FMAX_sigma = GetScaledFraction( |
926 | le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma)((__uint32_t)(getASICProfilingInfo->ulMargin_Fmax_sigma)), 10000); |
927 | fMargin_Plat_sigma = GetScaledFraction( |
928 | le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma)((__uint32_t)(getASICProfilingInfo->ulMargin_plat_sigma)), 10000); |
929 | |
930 | fMargin_DC_sigma = GetScaledFraction( |
931 | le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma)((__uint32_t)(getASICProfilingInfo->ulMargin_DC_sigma)), 100); |
932 | fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000)); |
933 | |
934 | fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100)); |
935 | fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100)); |
936 | fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100)); |
937 | fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100))); |
938 | fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10)); |
939 | |
940 | fSclk = GetScaledFraction(sclk, 100); |
941 | |
942 | fV_max = fDivide(GetScaledFraction( |
943 | le32_to_cpu(getASICProfilingInfo->ulMaxVddc)((__uint32_t)(getASICProfilingInfo->ulMaxVddc)), 1000), ConvertToFraction(4)); |
944 | fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp)((__uint32_t)(getASICProfilingInfo->ulBoardCoreTemp)), 10); |
945 | fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor)((__uint32_t)(getASICProfilingInfo->ulEvvLkgFactor)), 100); |
946 | fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp)((__uint32_t)(getASICProfilingInfo->ulLeakageTemp)), 10); |
947 | fV_FT = fDivide(GetScaledFraction( |
948 | le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage)((__uint32_t)(getASICProfilingInfo->ulLeakageVoltage)), 1000), ConvertToFraction(4)); |
949 | fV_min = fDivide(GetScaledFraction( |
950 | le32_to_cpu(getASICProfilingInfo->ulMinVddc)((__uint32_t)(getASICProfilingInfo->ulMinVddc)), 1000), ConvertToFraction(4)); |
951 | |
952 | /*----------------------- |
953 | * PART 3 |
954 | *----------------------- |
955 | */ |
956 | |
957 | fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5)); |
958 | fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); |
959 | fC_Term = fAdd(fMargin_RO_c, |
960 | fAdd(fMultiply(fSM_A0, fLkg_FT), |
961 | fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)), |
962 | fAdd(fMultiply(fSM_A3, fSclk), |
963 | fSubtract(fSM_A7, fRO_fused))))); |
964 | |
965 | fVDDC_base = fSubtract(fRO_fused, |
966 | fSubtract(fMargin_RO_c, |
967 | fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk)))); |
968 | fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2)); |
969 | |
970 | repeat = fSubtract(fVDDC_base, |
971 | fDivide(fMargin_DC_sigma, ConvertToFraction(1000))); |
972 | |
973 | fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a, |
974 | fGetSquare(repeat)), |
975 | fAdd(fMultiply(fMargin_RO_b, repeat), |
976 | fMargin_RO_c)); |
977 | |
978 | fDC_SCLK = fSubtract(fRO_fused, |
979 | fSubtract(fRO_DC_margin, |
980 | fSubtract(fSM_A3, |
981 | fMultiply(fSM_A2, repeat)))); |
982 | fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1)); |
983 | |
984 | fSigma_DC = fSubtract(fSclk, fDC_SCLK); |
985 | |
986 | fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean); |
987 | fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean); |
988 | fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma); |
989 | fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma); |
990 | |
991 | fSquared_Sigma_DC = fGetSquare(fSigma_DC); |
992 | fSquared_Sigma_CR = fGetSquare(fSigma_CR); |
993 | fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX); |
994 | |
995 | fSclk_margin = fAdd(fMicro_FMAX, |
996 | fAdd(fMicro_CR, |
997 | fAdd(fMargin_fixed, |
998 | fSqrt(fAdd(fSquared_Sigma_FMAX, |
999 | fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR)))))); |
1000 | /* |
1001 | fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5; |
1002 | fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6; |
1003 | fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused; |
1004 | */ |
1005 | |
1006 | fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5); |
1007 | fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6); |
1008 | fC_Term = fAdd(fRO_DC_margin, |
1009 | fAdd(fMultiply(fSM_A0, fLkg_FT), |
1010 | fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT), |
1011 | fAdd(fSclk, fSclk_margin)), |
1012 | fAdd(fMultiply(fSM_A3, |
1013 | fAdd(fSclk, fSclk_margin)), |
1014 | fSubtract(fSM_A7, fRO_fused))))); |
1015 | |
1016 | SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots); |
1017 | |
1018 | if (GreaterThan(fRoots[0], fRoots[1])) |
1019 | fEVV_V = fRoots[1]; |
1020 | else |
1021 | fEVV_V = fRoots[0]; |
1022 | |
1023 | if (GreaterThan(fV_min, fEVV_V)) |
1024 | fEVV_V = fV_min; |
1025 | else if (GreaterThan(fEVV_V, fV_max)) |
1026 | fEVV_V = fSubtract(fV_max, fStepSize); |
1027 | |
1028 | fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0); |
1029 | |
1030 | /*----------------- |
1031 | * PART 4 |
1032 | *----------------- |
1033 | */ |
1034 | |
1035 | fV_x = fV_min; |
1036 | |
1037 | while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) { |
1038 | fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd( |
1039 | fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk), |
1040 | fGetSquare(fV_x)), fDerateTDP); |
1041 | |
1042 | fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor, |
1043 | fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused, |
1044 | fT_prod), fKv_b_fused), fV_x)), fV_x))); |
1045 | fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply( |
1046 | fKt_Beta_fused, fT_prod))); |
1047 | fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( |
1048 | fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT))); |
1049 | fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply( |
1050 | fKt_Beta_fused, fT_FT))); |
1051 | |
1052 | fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right); |
1053 | |
1054 | fTDP_Current = fDivide(fTDP_Power, fV_x); |
1055 | |
1056 | fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine), |
1057 | ConvertToFraction(10))); |
1058 | |
1059 | fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0); |
1060 | |
1061 | if (GreaterThan(fV_max, fV_NL) && |
1062 | (GreaterThan(fV_NL, fEVV_V) || |
1063 | Equal(fV_NL, fEVV_V))) { |
1064 | fV_NL = fMultiply(fV_NL, ConvertToFraction(1000)); |
1065 | |
1066 | *voltage = (uint16_t)fV_NL.partial.real; |
1067 | break; |
1068 | } else |
1069 | fV_x = fAdd(fV_x, fStepSize); |
1070 | } |
1071 | |
1072 | return result; |
1073 | } |
1074 | |
1075 | /** atomctrl_get_voltage_evv_on_sclk gets voltage via call to ATOM COMMAND table. |
1076 | * @param hwmgr input: pointer to hwManager |
1077 | * @param voltage_type input: type of EVV voltage VDDC or VDDGFX |
1078 | * @param sclk input: in 10Khz unit. DPM state SCLK frequency |
1079 | * which is define in PPTable SCLK/VDDC dependence |
1080 | * table associated with this virtual_voltage_Id |
1081 | * @param virtual_voltage_Id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08 |
1082 | * @param voltage output: real voltage level in unit of mv |
1083 | */ |
1084 | int atomctrl_get_voltage_evv_on_sclk( |
1085 | struct pp_hwmgr *hwmgr, |
1086 | uint8_t voltage_type, |
1087 | uint32_t sclk, uint16_t virtual_voltage_Id, |
1088 | uint16_t *voltage) |
1089 | { |
1090 | struct amdgpu_device *adev = hwmgr->adev; |
1091 | GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; |
1092 | int result; |
1093 | |
1094 | get_voltage_info_param_space.ucVoltageType = |
1095 | voltage_type; |
1096 | get_voltage_info_param_space.ucVoltageMode = |
1097 | ATOM_GET_VOLTAGE_EVV_VOLTAGE0x09; |
1098 | get_voltage_info_param_space.usVoltageLevel = |
1099 | cpu_to_le16(virtual_voltage_Id)((__uint16_t)(virtual_voltage_Id)); |
1100 | get_voltage_info_param_space.ulSCLKFreq = |
1101 | cpu_to_le32(sclk)((__uint32_t)(sclk)); |
1102 | |
1103 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
1104 | GetIndexIntoMasterTable(COMMAND, GetVoltageInfo)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->GetVoltageInfo )-(char*)0)/sizeof(USHORT)), |
1105 | (uint32_t *)&get_voltage_info_param_space); |
1106 | |
1107 | *voltage = result ? 0 : |
1108 | le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)((__uint16_t)(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) (&get_voltage_info_param_space))->usVoltageLevel)) |
1109 | (&get_voltage_info_param_space))->usVoltageLevel)((__uint16_t)(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) (&get_voltage_info_param_space))->usVoltageLevel)); |
1110 | |
1111 | return result; |
1112 | } |
1113 | |
1114 | /** |
1115 | * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table. |
1116 | * @param hwmgr input: pointer to hwManager |
1117 | * @param virtual_voltage_id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08 |
1118 | * @param voltage output: real voltage level in unit of mv |
1119 | */ |
1120 | int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, |
1121 | uint16_t virtual_voltage_id, |
1122 | uint16_t *voltage) |
1123 | { |
1124 | struct amdgpu_device *adev = hwmgr->adev; |
1125 | GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; |
1126 | int result; |
1127 | int entry_id; |
1128 | |
1129 | /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ |
1130 | for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) { |
1131 | if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) { |
1132 | /* found */ |
1133 | break; |
1134 | } |
1135 | } |
1136 | |
1137 | if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) { |
1138 | pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n")do { } while(0); |
1139 | return -EINVAL22; |
1140 | } |
1141 | |
1142 | get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC1; |
1143 | get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE0x09; |
1144 | get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id; |
1145 | get_voltage_info_param_space.ulSCLKFreq = |
1146 | cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk)((__uint32_t)(hwmgr->dyn_state.vddc_dependency_on_sclk-> entries[entry_id].clk)); |
1147 | |
1148 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
1149 | GetIndexIntoMasterTable(COMMAND, GetVoltageInfo)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->GetVoltageInfo )-(char*)0)/sizeof(USHORT)), |
1150 | (uint32_t *)&get_voltage_info_param_space); |
1151 | |
1152 | if (0 != result) |
1153 | return result; |
1154 | |
1155 | *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)((__uint16_t)(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) (&get_voltage_info_param_space))->usVoltageLevel)) |
1156 | (&get_voltage_info_param_space))->usVoltageLevel)((__uint16_t)(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) (&get_voltage_info_param_space))->usVoltageLevel)); |
1157 | |
1158 | return result; |
1159 | } |
1160 | |
1161 | /** |
1162 | * Get the mpll reference clock in 10KHz |
1163 | */ |
1164 | uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr) |
1165 | { |
1166 | ATOM_COMMON_TABLE_HEADER *fw_info; |
1167 | uint32_t clock; |
1168 | u8 frev, crev; |
1169 | u16 size; |
1170 | |
1171 | fw_info = (ATOM_COMMON_TABLE_HEADER *) |
1172 | smu_atom_get_data_table(hwmgr->adev, |
1173 | GetIndexIntoMasterTable(DATA, FirmwareInfo)(((char*)(&((ATOM_MASTER_LIST_OF_DATA_TABLES*)0)->FirmwareInfo )-(char*)0)/sizeof(USHORT)), |
1174 | &size, &frev, &crev); |
1175 | |
1176 | if (fw_info == NULL((void *)0)) |
1177 | clock = 2700; |
1178 | else { |
1179 | if ((fw_info->ucTableFormatRevision == 2) && |
1180 | (le16_to_cpu(fw_info->usStructureSize)((__uint16_t)(fw_info->usStructureSize)) >= sizeof(ATOM_FIRMWARE_INFO_V2_1))) { |
1181 | ATOM_FIRMWARE_INFO_V2_1 *fwInfo_2_1 = |
1182 | (ATOM_FIRMWARE_INFO_V2_1 *)fw_info; |
1183 | clock = (uint32_t)(le16_to_cpu(fwInfo_2_1->usMemoryReferenceClock)((__uint16_t)(fwInfo_2_1->usMemoryReferenceClock))); |
1184 | } else { |
1185 | ATOM_FIRMWARE_INFO *fwInfo_0_0 = |
1186 | (ATOM_FIRMWARE_INFO *)fw_info; |
1187 | clock = (uint32_t)(le16_to_cpu(fwInfo_0_0->usReferenceClock)((__uint16_t)(fwInfo_0_0->usReferenceClock))); |
1188 | } |
1189 | } |
1190 | |
1191 | return clock; |
1192 | } |
1193 | |
1194 | /** |
1195 | * Get the asic internal spread spectrum table |
1196 | */ |
1197 | static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device) |
1198 | { |
1199 | ATOM_ASIC_INTERNAL_SS_INFO *table = NULL((void *)0); |
1200 | u8 frev, crev; |
1201 | u16 size; |
1202 | |
1203 | table = (ATOM_ASIC_INTERNAL_SS_INFO *) |
1204 | smu_atom_get_data_table(device, |
1205 | GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info)(((char*)(&((ATOM_MASTER_LIST_OF_DATA_TABLES*)0)->ASIC_InternalSS_Info )-(char*)0)/sizeof(USHORT)), |
1206 | &size, &frev, &crev); |
1207 | |
1208 | return table; |
1209 | } |
1210 | |
1211 | /** |
1212 | * Get the asic internal spread spectrum assignment |
1213 | */ |
1214 | static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr, |
1215 | const uint8_t clockSource, |
1216 | const uint32_t clockSpeed, |
1217 | pp_atomctrl_internal_ss_info *ssEntry) |
1218 | { |
1219 | ATOM_ASIC_INTERNAL_SS_INFO *table; |
1220 | ATOM_ASIC_SS_ASSIGNMENT *ssInfo; |
1221 | int entry_found = 0; |
1222 | |
1223 | memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info))__builtin_memset((ssEntry), (0x00), (sizeof(pp_atomctrl_internal_ss_info ))); |
1224 | |
1225 | table = asic_internal_ss_get_ss_table(hwmgr->adev); |
1226 | |
1227 | if (NULL((void *)0) == table) |
1228 | return -1; |
1229 | |
1230 | ssInfo = &table->asSpreadSpectrum[0]; |
1231 | |
1232 | while (((uint8_t *)ssInfo - (uint8_t *)table) < |
1233 | le16_to_cpu(table->sHeader.usStructureSize)((__uint16_t)(table->sHeader.usStructureSize))) { |
1234 | if ((clockSource == ssInfo->ucClockIndication) && |
1235 | ((uint32_t)clockSpeed <= le32_to_cpu(ssInfo->ulTargetClockRange)((__uint32_t)(ssInfo->ulTargetClockRange)))) { |
1236 | entry_found = 1; |
1237 | break; |
1238 | } |
1239 | |
1240 | ssInfo = (ATOM_ASIC_SS_ASSIGNMENT *)((uint8_t *)ssInfo + |
1241 | sizeof(ATOM_ASIC_SS_ASSIGNMENT)); |
1242 | } |
1243 | |
1244 | if (entry_found) { |
1245 | ssEntry->speed_spectrum_percentage = |
1246 | le16_to_cpu(ssInfo->usSpreadSpectrumPercentage)((__uint16_t)(ssInfo->usSpreadSpectrumPercentage)); |
1247 | ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz)((__uint16_t)(ssInfo->usSpreadRateInKhz)); |
1248 | |
1249 | if (((GET_DATA_TABLE_MAJOR_REVISION(table)((((ATOM_COMMON_TABLE_HEADER*)table)->ucTableFormatRevision )&0x3F) == 2) && |
1250 | (GET_DATA_TABLE_MINOR_REVISION(table)((((ATOM_COMMON_TABLE_HEADER*)table)->ucTableContentRevision )&0x3F) >= 2)) || |
1251 | (GET_DATA_TABLE_MAJOR_REVISION(table)((((ATOM_COMMON_TABLE_HEADER*)table)->ucTableFormatRevision )&0x3F) == 3)) { |
1252 | ssEntry->speed_spectrum_rate /= 100; |
1253 | } |
1254 | |
1255 | switch (ssInfo->ucSpreadSpectrumMode) { |
1256 | case 0: |
1257 | ssEntry->speed_spectrum_mode = |
1258 | pp_atomctrl_spread_spectrum_mode_down; |
1259 | break; |
1260 | case 1: |
1261 | ssEntry->speed_spectrum_mode = |
1262 | pp_atomctrl_spread_spectrum_mode_center; |
1263 | break; |
1264 | default: |
1265 | ssEntry->speed_spectrum_mode = |
1266 | pp_atomctrl_spread_spectrum_mode_down; |
1267 | break; |
1268 | } |
1269 | } |
1270 | |
1271 | return entry_found ? 0 : 1; |
1272 | } |
1273 | |
1274 | /** |
1275 | * Get the memory clock spread spectrum info |
1276 | */ |
1277 | int atomctrl_get_memory_clock_spread_spectrum( |
1278 | struct pp_hwmgr *hwmgr, |
1279 | const uint32_t memory_clock, |
1280 | pp_atomctrl_internal_ss_info *ssInfo) |
1281 | { |
1282 | return asic_internal_ss_get_ss_asignment(hwmgr, |
1283 | ASIC_INTERNAL_MEMORY_SS1, memory_clock, ssInfo); |
1284 | } |
1285 | /** |
1286 | * Get the engine clock spread spectrum info |
1287 | */ |
1288 | int atomctrl_get_engine_clock_spread_spectrum( |
1289 | struct pp_hwmgr *hwmgr, |
1290 | const uint32_t engine_clock, |
1291 | pp_atomctrl_internal_ss_info *ssInfo) |
1292 | { |
1293 | return asic_internal_ss_get_ss_asignment(hwmgr, |
1294 | ASIC_INTERNAL_ENGINE_SS2, engine_clock, ssInfo); |
1295 | } |
1296 | |
1297 | int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, |
1298 | uint16_t end_index, uint32_t mask, uint32_t *efuse) |
1299 | { |
1300 | struct amdgpu_device *adev = hwmgr->adev; |
1301 | int result; |
1302 | READ_EFUSE_VALUE_PARAMETER efuse_param; |
1303 | |
1304 | efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4)((__uint16_t)((start_index / 32) * 4)); |
1305 | efuse_param.sEfuse.ucBitShift = (uint8_t) |
1306 | (start_index - ((start_index / 32) * 32)); |
1307 | efuse_param.sEfuse.ucBitLength = (uint8_t) |
1308 | ((end_index - start_index) + 1); |
1309 | |
1310 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
1311 | GetIndexIntoMasterTable(COMMAND, ReadEfuseValue)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->ReadEfuseValue )-(char*)0)/sizeof(USHORT)), |
1312 | (uint32_t *)&efuse_param); |
1313 | *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue)((__uint32_t)(efuse_param.ulEfuseValue)) & mask; |
1314 | |
1315 | return result; |
1316 | } |
1317 | |
1318 | int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, |
1319 | uint8_t level) |
1320 | { |
1321 | struct amdgpu_device *adev = hwmgr->adev; |
1322 | DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters; |
1323 | int result; |
1324 | |
1325 | memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = |
1326 | memory_clock & SET_CLOCK_FREQ_MASK0x00FFFFFF; |
1327 | memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = |
1328 | ADJUST_MC_SETTING_PARAM3; |
1329 | memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level; |
1330 | |
1331 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
1332 | GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->DynamicMemorySettings )-(char*)0)/sizeof(USHORT)), |
1333 | (uint32_t *)&memory_clock_parameters); |
1334 | |
1335 | return result; |
1336 | } |
1337 | |
1338 | int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, |
1339 | uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage) |
1340 | { |
1341 | struct amdgpu_device *adev = hwmgr->adev; |
1342 | int result; |
1343 | GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space; |
1344 | |
1345 | get_voltage_info_param_space.ucVoltageType = voltage_type; |
1346 | get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE0x09; |
1347 | get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id)((__uint16_t)(virtual_voltage_Id)); |
1348 | get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk)((__uint32_t)(sclk)); |
1349 | |
1350 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
1351 | GetIndexIntoMasterTable(COMMAND, GetVoltageInfo)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->GetVoltageInfo )-(char*)0)/sizeof(USHORT)), |
1352 | (uint32_t *)&get_voltage_info_param_space); |
1353 | |
1354 | *voltage = result ? 0 : |
1355 | le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel)((__uint32_t)(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *) (&get_voltage_info_param_space))->ulVoltageLevel)); |
1356 | |
1357 | return result; |
1358 | } |
1359 | |
1360 | int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table) |
1361 | { |
1362 | |
1363 | int i; |
1364 | u8 frev, crev; |
1365 | u16 size; |
1366 | |
1367 | ATOM_SMU_INFO_V2_1 *psmu_info = |
1368 | (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev, |
1369 | GetIndexIntoMasterTable(DATA, SMU_Info)(((char*)(&((ATOM_MASTER_LIST_OF_DATA_TABLES*)0)->SMU_Info )-(char*)0)/sizeof(USHORT)), |
1370 | &size, &frev, &crev); |
1371 | |
1372 | |
1373 | for (i = 0; i < psmu_info->ucSclkEntryNum; i++) { |
1374 | table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting; |
1375 | table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv; |
1376 | table->entry[i].usFcw_pcc = |
1377 | le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc)((__uint16_t)(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc) ); |
1378 | table->entry[i].usFcw_trans_upper = |
1379 | le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper)((__uint16_t)(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper )); |
1380 | table->entry[i].usRcw_trans_lower = |
1381 | le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower)((__uint16_t)(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower )); |
1382 | } |
1383 | |
1384 | return 0; |
1385 | } |
1386 | |
1387 | int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, |
1388 | struct pp_atom_ctrl__avfs_parameters *param) |
1389 | { |
1390 | ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL((void *)0); |
1391 | |
1392 | if (param == NULL((void *)0)) |
1393 | return -EINVAL22; |
1394 | |
1395 | profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *) |
1396 | smu_atom_get_data_table(hwmgr->adev, |
1397 | GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo)(((char*)(&((ATOM_MASTER_LIST_OF_DATA_TABLES*)0)->ASIC_ProfilingInfo )-(char*)0)/sizeof(USHORT)), |
1398 | NULL((void *)0), NULL((void *)0), NULL((void *)0)); |
1399 | if (!profile) |
1400 | return -1; |
1401 | |
1402 | param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0)((__uint32_t)(profile->ulAVFS_meanNsigma_Acontant0)); |
1403 | param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1)((__uint32_t)(profile->ulAVFS_meanNsigma_Acontant1)); |
1404 | param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2)((__uint32_t)(profile->ulAVFS_meanNsigma_Acontant2)); |
1405 | param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma)((__uint16_t)(profile->usAVFS_meanNsigma_DC_tol_sigma)); |
1406 | param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean)((__uint16_t)(profile->usAVFS_meanNsigma_Platform_mean)); |
1407 | param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma)((__uint16_t)(profile->usAVFS_meanNsigma_Platform_sigma)); |
1408 | param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0)((__uint32_t)(profile->ulGB_VDROOP_TABLE_CKSOFF_a0)); |
1409 | param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1)((__uint32_t)(profile->ulGB_VDROOP_TABLE_CKSOFF_a1)); |
1410 | param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2)((__uint32_t)(profile->ulGB_VDROOP_TABLE_CKSOFF_a2)); |
1411 | param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0)((__uint32_t)(profile->ulGB_VDROOP_TABLE_CKSON_a0)); |
1412 | param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1)((__uint32_t)(profile->ulGB_VDROOP_TABLE_CKSON_a1)); |
1413 | param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2)((__uint32_t)(profile->ulGB_VDROOP_TABLE_CKSON_a2)); |
1414 | param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1)((__uint32_t)(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1)); |
1415 | param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2)((__uint16_t)(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2)); |
1416 | param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b)((__uint32_t)(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b)); |
1417 | param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1)((__uint32_t)(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1)); |
1418 | param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2)((__uint16_t)(profile->usAVFSGB_FUSE_TABLE_CKSON_m2)); |
1419 | param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b)((__uint32_t)(profile->ulAVFSGB_FUSE_TABLE_CKSON_b)); |
1420 | param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv)((__uint16_t)(profile->usMaxVoltage_0_25mv)); |
1421 | param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF; |
1422 | param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON; |
1423 | param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF; |
1424 | param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON; |
1425 | param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor)((__uint16_t)(profile->usPSM_Age_ComFactor)); |
1426 | param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage; |
1427 | |
1428 | return 0; |
1429 | } |
1430 | |
1431 | int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, |
1432 | uint8_t *svd_gpio_id, uint8_t *svc_gpio_id, |
1433 | uint16_t *load_line) |
1434 | { |
1435 | ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = |
1436 | (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev); |
1437 | |
1438 | const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; |
1439 | |
1440 | PP_ASSERT_WITH_CODE((NULL != voltage_info),do { if (!((((void *)0) != voltage_info))) { printk("\0014" "amdgpu: " "%s\n", "Could not find Voltage Table in BIOS."); return -22 ; } } while (0) |
1441 | "Could not find Voltage Table in BIOS.", return -EINVAL)do { if (!((((void *)0) != voltage_info))) { printk("\0014" "amdgpu: " "%s\n", "Could not find Voltage Table in BIOS."); return -22 ; } } while (0); |
1442 | |
1443 | voltage_object = atomctrl_lookup_voltage_type_v3 |
1444 | (voltage_info, voltage_type, VOLTAGE_OBJ_SVID27); |
1445 | |
1446 | *svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId; |
1447 | *svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId; |
1448 | *load_line = voltage_object->asSVID2Obj.usLoadLine_PSI; |
1449 | |
1450 | return 0; |
1451 | } |
1452 | |
1453 | int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id) |
1454 | { |
1455 | struct amdgpu_device *adev = hwmgr->adev; |
1456 | SET_VOLTAGE_PS_ALLOCATION allocation; |
1457 | SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters = |
1458 | (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage; |
1459 | int result; |
1460 | |
1461 | voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID8; |
1462 | |
1463 | result = amdgpu_atom_execute_table(adev->mode_info.atom_context, |
1464 | GetIndexIntoMasterTable(COMMAND, SetVoltage)(((char*)(&((ATOM_MASTER_LIST_OF_COMMAND_TABLES*)0)->SetVoltage )-(char*)0)/sizeof(USHORT)), |
1465 | (uint32_t *)voltage_parameters); |
1466 | |
1467 | *virtual_voltage_id = voltage_parameters->usVoltageLevel; |
1468 | |
1469 | return result; |
1470 | } |
1471 | |
1472 | int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, |
1473 | uint16_t *vddc, uint16_t *vddci, |
1474 | uint16_t virtual_voltage_id, |
1475 | uint16_t efuse_voltage_id) |
1476 | { |
1477 | int i, j; |
1478 | int ix; |
1479 | u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf; |
1480 | ATOM_ASIC_PROFILING_INFO_V2_1 *profile; |
1481 | |
1482 | *vddc = 0; |
1483 | *vddci = 0; |
1484 | |
1485 | ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo)(((char*)(&((ATOM_MASTER_LIST_OF_DATA_TABLES*)0)->ASIC_ProfilingInfo )-(char*)0)/sizeof(USHORT)); |
1486 | |
1487 | profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *) |
1488 | smu_atom_get_data_table(hwmgr->adev, |
1489 | ix, |
1490 | NULL((void *)0), NULL((void *)0), NULL((void *)0)); |
1491 | if (!profile) |
1492 | return -EINVAL22; |
1493 | |
1494 | if ((profile->asHeader.ucTableFormatRevision >= 2) && |
1495 | (profile->asHeader.ucTableContentRevision >= 1) && |
1496 | (profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) { |
1497 | leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset); |
1498 | vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset); |
1499 | vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset); |
1500 | if (profile->ucElbVDDC_Num > 0) { |
1501 | for (i = 0; i < profile->ucElbVDDC_Num; i++) { |
1502 | if (vddc_id_buf[i] == virtual_voltage_id) { |
1503 | for (j = 0; j < profile->ucLeakageBinNum; j++) { |
1504 | if (efuse_voltage_id <= leakage_bin[j]) { |
1505 | *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i]; |
1506 | break; |
1507 | } |
1508 | } |
1509 | break; |
1510 | } |
1511 | } |
1512 | } |
1513 | |
1514 | vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset); |
1515 | vddci_buf = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset); |
1516 | if (profile->ucElbVDDCI_Num > 0) { |
1517 | for (i = 0; i < profile->ucElbVDDCI_Num; i++) { |
1518 | if (vddci_id_buf[i] == virtual_voltage_id) { |
1519 | for (j = 0; j < profile->ucLeakageBinNum; j++) { |
1520 | if (efuse_voltage_id <= leakage_bin[j]) { |
1521 | *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i]; |
1522 | break; |
1523 | } |
1524 | } |
1525 | break; |
1526 | } |
1527 | } |
1528 | } |
1529 | } |
1530 | |
1531 | return 0; |
1532 | } |
1533 | |
1534 | void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc, |
1535 | uint32_t *min_vddc) |
1536 | { |
1537 | void *profile; |
1538 | |
1539 | profile = smu_atom_get_data_table(hwmgr->adev, |
1540 | GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo)(((char*)(&((ATOM_MASTER_LIST_OF_DATA_TABLES*)0)->ASIC_ProfilingInfo )-(char*)0)/sizeof(USHORT)), |
1541 | NULL((void *)0), NULL((void *)0), NULL((void *)0)); |
1542 | |
1543 | if (profile) { |
1544 | switch (hwmgr->chip_id) { |
1545 | case CHIP_TONGA: |
1546 | case CHIP_FIJI: |
1547 | *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc)((__uint32_t)(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)-> ulMaxVddc)) / 4; |
1548 | *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc)((__uint32_t)(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)-> ulMinVddc)) / 4; |
1549 | return; |
1550 | case CHIP_POLARIS11: |
1551 | case CHIP_POLARIS10: |
1552 | case CHIP_POLARIS12: |
1553 | *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc)((__uint32_t)(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)-> ulMaxVddc)) / 100; |
1554 | *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc)((__uint32_t)(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)-> ulMinVddc)) / 100; |
1555 | return; |
1556 | default: |
1557 | break; |
1558 | } |
1559 | } |
1560 | *max_vddc = 0; |
1561 | *min_vddc = 0; |
1562 | } |