File: | dev/pci/drm/amd/pm/swsmu/amdgpu_smu.c |
Warning: | line 1429, column 27 Assigned value is garbage or undefined |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * Copyright 2019 Advanced Micro Devices, Inc. | |||
3 | * | |||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
5 | * copy of this software and associated documentation files (the "Software"), | |||
6 | * to deal in the Software without restriction, including without limitation | |||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
8 | * and/or sell copies of the Software, and to permit persons to whom the | |||
9 | * Software is furnished to do so, subject to the following conditions: | |||
10 | * | |||
11 | * The above copyright notice and this permission notice shall be included in | |||
12 | * all copies or substantial portions of the Software. | |||
13 | * | |||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |||
20 | * OTHER DEALINGS IN THE SOFTWARE. | |||
21 | */ | |||
22 | ||||
23 | #define SWSMU_CODE_LAYER_L1 | |||
24 | ||||
25 | #include <linux/firmware.h> | |||
26 | #include <linux/pci.h> | |||
27 | ||||
28 | #include "amdgpu.h" | |||
29 | #include "amdgpu_smu.h" | |||
30 | #include "smu_internal.h" | |||
31 | #include "atom.h" | |||
32 | #include "arcturus_ppt.h" | |||
33 | #include "navi10_ppt.h" | |||
34 | #include "sienna_cichlid_ppt.h" | |||
35 | #include "renoir_ppt.h" | |||
36 | #include "amd_pcie.h" | |||
37 | ||||
38 | /* | |||
39 | * DO NOT use these for err/warn/info/debug messages. | |||
40 | * Use dev_err, dev_warn, dev_info and dev_dbg instead. | |||
41 | * They are more MGPU friendly. | |||
42 | */ | |||
43 | #undef pr_err | |||
44 | #undef pr_warn | |||
45 | #undef pr_info | |||
46 | #undef pr_debug | |||
47 | ||||
48 | size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) | |||
49 | { | |||
50 | size_t size = 0; | |||
51 | ||||
52 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
53 | return -EOPNOTSUPP45; | |||
54 | ||||
55 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
56 | ||||
57 | size = smu_get_pp_feature_mask(smu, buf)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_pp_feature_mask ? (smu)->ppt_funcs->get_pp_feature_mask(smu, buf) : 0) : -22); | |||
58 | ||||
59 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
60 | ||||
61 | return size; | |||
62 | } | |||
63 | ||||
64 | int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) | |||
65 | { | |||
66 | int ret = 0; | |||
67 | ||||
68 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
69 | return -EOPNOTSUPP45; | |||
70 | ||||
71 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
72 | ||||
73 | ret = smu_set_pp_feature_mask(smu, new_mask)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_pp_feature_mask ? (smu)->ppt_funcs->set_pp_feature_mask(smu, new_mask) : 0) : -22); | |||
74 | ||||
75 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
76 | ||||
77 | return ret; | |||
78 | } | |||
79 | ||||
80 | int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) | |||
81 | { | |||
82 | int ret = 0; | |||
83 | struct smu_context *smu = &adev->smu; | |||
84 | ||||
85 | if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status) | |||
86 | *value = smu_get_gfx_off_status(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_gfx_off_status ? (smu)->ppt_funcs->get_gfx_off_status(smu) : 0) : -22 ); | |||
87 | else | |||
88 | ret = -EINVAL22; | |||
89 | ||||
90 | return ret; | |||
91 | } | |||
92 | ||||
93 | int smu_set_soft_freq_range(struct smu_context *smu, | |||
94 | enum smu_clk_type clk_type, | |||
95 | uint32_t min, | |||
96 | uint32_t max) | |||
97 | { | |||
98 | int ret = 0; | |||
99 | ||||
100 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
101 | ||||
102 | if (smu->ppt_funcs->set_soft_freq_limited_range) | |||
103 | ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, | |||
104 | clk_type, | |||
105 | min, | |||
106 | max); | |||
107 | ||||
108 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
109 | ||||
110 | return ret; | |||
111 | } | |||
112 | ||||
113 | int smu_get_dpm_freq_range(struct smu_context *smu, | |||
114 | enum smu_clk_type clk_type, | |||
115 | uint32_t *min, | |||
116 | uint32_t *max) | |||
117 | { | |||
118 | int ret = 0; | |||
119 | ||||
120 | if (!min && !max) | |||
121 | return -EINVAL22; | |||
122 | ||||
123 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
124 | ||||
125 | if (smu->ppt_funcs->get_dpm_ultimate_freq) | |||
126 | ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, | |||
127 | clk_type, | |||
128 | min, | |||
129 | max); | |||
130 | ||||
131 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
132 | ||||
133 | return ret; | |||
134 | } | |||
135 | ||||
136 | static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, | |||
137 | bool_Bool enable) | |||
138 | { | |||
139 | struct smu_power_context *smu_power = &smu->smu_power; | |||
140 | struct smu_power_gate *power_gate = &smu_power->power_gate; | |||
141 | int ret = 0; | |||
142 | ||||
143 | if (!smu->ppt_funcs->dpm_set_vcn_enable) | |||
144 | return 0; | |||
145 | ||||
146 | if (atomic_read(&power_gate->vcn_gated)({ typeof(*(&power_gate->vcn_gated)) __tmp = *(volatile typeof(*(&power_gate->vcn_gated)) *)&(*(&power_gate ->vcn_gated)); membar_datadep_consumer(); __tmp; }) ^ enable) | |||
147 | return 0; | |||
148 | ||||
149 | ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); | |||
150 | if (!ret) | |||
151 | atomic_set(&power_gate->vcn_gated, !enable)({ typeof(*(&power_gate->vcn_gated)) __tmp = ((!enable )); *(volatile typeof(*(&power_gate->vcn_gated)) *)& (*(&power_gate->vcn_gated)) = __tmp; __tmp; }); | |||
152 | ||||
153 | return ret; | |||
154 | } | |||
155 | ||||
156 | static int smu_dpm_set_vcn_enable(struct smu_context *smu, | |||
157 | bool_Bool enable) | |||
158 | { | |||
159 | struct smu_power_context *smu_power = &smu->smu_power; | |||
160 | struct smu_power_gate *power_gate = &smu_power->power_gate; | |||
161 | int ret = 0; | |||
162 | ||||
163 | mutex_lock(&power_gate->vcn_gate_lock)rw_enter_write(&power_gate->vcn_gate_lock); | |||
164 | ||||
165 | ret = smu_dpm_set_vcn_enable_locked(smu, enable); | |||
166 | ||||
167 | mutex_unlock(&power_gate->vcn_gate_lock)rw_exit_write(&power_gate->vcn_gate_lock); | |||
168 | ||||
169 | return ret; | |||
170 | } | |||
171 | ||||
172 | static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu, | |||
173 | bool_Bool enable) | |||
174 | { | |||
175 | struct smu_power_context *smu_power = &smu->smu_power; | |||
176 | struct smu_power_gate *power_gate = &smu_power->power_gate; | |||
177 | int ret = 0; | |||
178 | ||||
179 | if (!smu->ppt_funcs->dpm_set_jpeg_enable) | |||
180 | return 0; | |||
181 | ||||
182 | if (atomic_read(&power_gate->jpeg_gated)({ typeof(*(&power_gate->jpeg_gated)) __tmp = *(volatile typeof(*(&power_gate->jpeg_gated)) *)&(*(&power_gate ->jpeg_gated)); membar_datadep_consumer(); __tmp; }) ^ enable) | |||
183 | return 0; | |||
184 | ||||
185 | ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); | |||
186 | if (!ret) | |||
187 | atomic_set(&power_gate->jpeg_gated, !enable)({ typeof(*(&power_gate->jpeg_gated)) __tmp = ((!enable )); *(volatile typeof(*(&power_gate->jpeg_gated)) *)& (*(&power_gate->jpeg_gated)) = __tmp; __tmp; }); | |||
188 | ||||
189 | return ret; | |||
190 | } | |||
191 | ||||
192 | static int smu_dpm_set_jpeg_enable(struct smu_context *smu, | |||
193 | bool_Bool enable) | |||
194 | { | |||
195 | struct smu_power_context *smu_power = &smu->smu_power; | |||
196 | struct smu_power_gate *power_gate = &smu_power->power_gate; | |||
197 | int ret = 0; | |||
198 | ||||
199 | mutex_lock(&power_gate->jpeg_gate_lock)rw_enter_write(&power_gate->jpeg_gate_lock); | |||
200 | ||||
201 | ret = smu_dpm_set_jpeg_enable_locked(smu, enable); | |||
202 | ||||
203 | mutex_unlock(&power_gate->jpeg_gate_lock)rw_exit_write(&power_gate->jpeg_gate_lock); | |||
204 | ||||
205 | return ret; | |||
206 | } | |||
207 | ||||
208 | /** | |||
209 | * smu_dpm_set_power_gate - power gate/ungate the specific IP block | |||
210 | * | |||
211 | * @smu: smu_context pointer | |||
212 | * @block_type: the IP block to power gate/ungate | |||
213 | * @gate: to power gate if true, ungate otherwise | |||
214 | * | |||
215 | * This API uses no smu->mutex lock protection due to: | |||
216 | * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). | |||
217 | * This is guarded to be race condition free by the caller. | |||
218 | * 2. Or get called on user setting request of power_dpm_force_performance_level. | |||
219 | * Under this case, the smu->mutex lock protection is already enforced on | |||
220 | * the parent API smu_force_performance_level of the call path. | |||
221 | */ | |||
222 | int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, | |||
223 | bool_Bool gate) | |||
224 | { | |||
225 | int ret = 0; | |||
226 | ||||
227 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
228 | return -EOPNOTSUPP45; | |||
229 | ||||
230 | switch (block_type) { | |||
231 | /* | |||
232 | * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses | |||
233 | * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept. | |||
234 | */ | |||
235 | case AMD_IP_BLOCK_TYPE_UVD: | |||
236 | case AMD_IP_BLOCK_TYPE_VCN: | |||
237 | ret = smu_dpm_set_vcn_enable(smu, !gate); | |||
238 | if (ret) | |||
239 | dev_err(smu->adev->dev, "Failed to power %s VCN!\n",printf("drm:pid%d:%s *ERROR* " "Failed to power %s VCN!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , gate ? "gate" : "ungate") | |||
240 | gate ? "gate" : "ungate")printf("drm:pid%d:%s *ERROR* " "Failed to power %s VCN!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , gate ? "gate" : "ungate"); | |||
241 | break; | |||
242 | case AMD_IP_BLOCK_TYPE_GFX: | |||
243 | ret = smu_gfx_off_control(smu, gate)((smu)->ppt_funcs ? ((smu)->ppt_funcs->gfx_off_control ? (smu)->ppt_funcs->gfx_off_control(smu, gate) : 0) : - 22); | |||
244 | if (ret) | |||
245 | dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",printf("drm:pid%d:%s *ERROR* " "Failed to %s gfxoff!\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__ , gate ? "enable" : "disable") | |||
246 | gate ? "enable" : "disable")printf("drm:pid%d:%s *ERROR* " "Failed to %s gfxoff!\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__ , gate ? "enable" : "disable"); | |||
247 | break; | |||
248 | case AMD_IP_BLOCK_TYPE_SDMA: | |||
249 | ret = smu_powergate_sdma(smu, gate)((smu)->ppt_funcs ? ((smu)->ppt_funcs->powergate_sdma ? (smu)->ppt_funcs->powergate_sdma(smu, gate) : 0) : - 22); | |||
250 | if (ret) | |||
251 | dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",printf("drm:pid%d:%s *ERROR* " "Failed to power %s SDMA!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , gate ? "gate" : "ungate") | |||
252 | gate ? "gate" : "ungate")printf("drm:pid%d:%s *ERROR* " "Failed to power %s SDMA!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , gate ? "gate" : "ungate"); | |||
253 | break; | |||
254 | case AMD_IP_BLOCK_TYPE_JPEG: | |||
255 | ret = smu_dpm_set_jpeg_enable(smu, !gate); | |||
256 | if (ret) | |||
257 | dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",printf("drm:pid%d:%s *ERROR* " "Failed to power %s JPEG!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , gate ? "gate" : "ungate") | |||
258 | gate ? "gate" : "ungate")printf("drm:pid%d:%s *ERROR* " "Failed to power %s JPEG!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , gate ? "gate" : "ungate"); | |||
259 | break; | |||
260 | default: | |||
261 | dev_err(smu->adev->dev, "Unsupported block type!\n")printf("drm:pid%d:%s *ERROR* " "Unsupported block type!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
262 | return -EINVAL22; | |||
263 | } | |||
264 | ||||
265 | return ret; | |||
266 | } | |||
267 | ||||
268 | int smu_get_power_num_states(struct smu_context *smu, | |||
269 | struct pp_states_info *state_info) | |||
270 | { | |||
271 | if (!state_info) | |||
272 | return -EINVAL22; | |||
273 | ||||
274 | /* not support power state */ | |||
275 | memset(state_info, 0, sizeof(struct pp_states_info))__builtin_memset((state_info), (0), (sizeof(struct pp_states_info ))); | |||
276 | state_info->nums = 1; | |||
277 | state_info->states[0] = POWER_STATE_TYPE_DEFAULT; | |||
278 | ||||
279 | return 0; | |||
280 | } | |||
281 | ||||
282 | bool_Bool is_support_sw_smu(struct amdgpu_device *adev) | |||
283 | { | |||
284 | if (adev->asic_type >= CHIP_ARCTURUS) | |||
285 | return true1; | |||
286 | ||||
287 | return false0; | |||
288 | } | |||
289 | ||||
290 | int smu_sys_get_pp_table(struct smu_context *smu, void **table) | |||
291 | { | |||
292 | struct smu_table_context *smu_table = &smu->smu_table; | |||
293 | uint32_t powerplay_table_size; | |||
294 | ||||
295 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
296 | return -EOPNOTSUPP45; | |||
297 | ||||
298 | if (!smu_table->power_play_table && !smu_table->hardcode_pptable) | |||
299 | return -EINVAL22; | |||
300 | ||||
301 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
302 | ||||
303 | if (smu_table->hardcode_pptable) | |||
304 | *table = smu_table->hardcode_pptable; | |||
305 | else | |||
306 | *table = smu_table->power_play_table; | |||
307 | ||||
308 | powerplay_table_size = smu_table->power_play_table_size; | |||
309 | ||||
310 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
311 | ||||
312 | return powerplay_table_size; | |||
313 | } | |||
314 | ||||
315 | int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) | |||
316 | { | |||
317 | struct smu_table_context *smu_table = &smu->smu_table; | |||
318 | ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; | |||
319 | int ret = 0; | |||
320 | ||||
321 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
322 | return -EOPNOTSUPP45; | |||
323 | ||||
324 | if (header->usStructureSize != size) { | |||
325 | dev_err(smu->adev->dev, "pp table size not matched !\n")printf("drm:pid%d:%s *ERROR* " "pp table size not matched !\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
326 | return -EIO5; | |||
327 | } | |||
328 | ||||
329 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
330 | if (!smu_table->hardcode_pptable) | |||
331 | smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL(0x0001 | 0x0004)); | |||
332 | if (!smu_table->hardcode_pptable) { | |||
333 | ret = -ENOMEM12; | |||
334 | goto failed; | |||
335 | } | |||
336 | ||||
337 | memcpy(smu_table->hardcode_pptable, buf, size)__builtin_memcpy((smu_table->hardcode_pptable), (buf), (size )); | |||
338 | smu_table->power_play_table = smu_table->hardcode_pptable; | |||
339 | smu_table->power_play_table_size = size; | |||
340 | ||||
341 | /* | |||
342 | * Special hw_fini action(for Navi1x, the DPMs disablement will be | |||
343 | * skipped) may be needed for custom pptable uploading. | |||
344 | */ | |||
345 | smu->uploading_custom_pp_table = true1; | |||
346 | ||||
347 | ret = smu_reset(smu); | |||
348 | if (ret) | |||
349 | dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret)do { } while(0); | |||
350 | ||||
351 | smu->uploading_custom_pp_table = false0; | |||
352 | ||||
353 | failed: | |||
354 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
355 | return ret; | |||
356 | } | |||
357 | ||||
358 | static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) | |||
359 | { | |||
360 | struct smu_feature *feature = &smu->smu_feature; | |||
361 | int ret = 0; | |||
362 | uint32_t allowed_feature_mask[SMU_FEATURE_MAX(64)/32]; | |||
363 | ||||
364 | bitmap_zero(feature->allowed, SMU_FEATURE_MAX(64)); | |||
365 | ||||
366 | ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_allowed_feature_mask ? (smu)->ppt_funcs->get_allowed_feature_mask(smu, allowed_feature_mask , (64)/32) : 0) : -22) | |||
367 | SMU_FEATURE_MAX/32)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_allowed_feature_mask ? (smu)->ppt_funcs->get_allowed_feature_mask(smu, allowed_feature_mask , (64)/32) : 0) : -22); | |||
368 | if (ret) | |||
369 | return ret; | |||
370 | ||||
371 | bitmap_or(feature->allowed, feature->allowed, | |||
372 | (unsigned long *)allowed_feature_mask, | |||
373 | feature->feature_num); | |||
374 | ||||
375 | return ret; | |||
376 | } | |||
377 | ||||
378 | static int smu_set_funcs(struct amdgpu_device *adev) | |||
379 | { | |||
380 | struct smu_context *smu = &adev->smu; | |||
381 | ||||
382 | if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) | |||
383 | smu->od_enabled = true1; | |||
384 | ||||
385 | switch (adev->asic_type) { | |||
386 | case CHIP_NAVI10: | |||
387 | case CHIP_NAVI14: | |||
388 | case CHIP_NAVI12: | |||
389 | navi10_set_ppt_funcs(smu); | |||
390 | break; | |||
391 | case CHIP_ARCTURUS: | |||
392 | adev->pm.pp_feature &= ~PP_GFXOFF_MASK; | |||
393 | arcturus_set_ppt_funcs(smu); | |||
394 | /* OD is not supported on Arcturus */ | |||
395 | smu->od_enabled =false0; | |||
396 | break; | |||
397 | case CHIP_SIENNA_CICHLID: | |||
398 | case CHIP_NAVY_FLOUNDER: | |||
399 | sienna_cichlid_set_ppt_funcs(smu); | |||
400 | break; | |||
401 | case CHIP_RENOIR: | |||
402 | renoir_set_ppt_funcs(smu); | |||
403 | break; | |||
404 | default: | |||
405 | return -EINVAL22; | |||
406 | } | |||
407 | ||||
408 | return 0; | |||
409 | } | |||
410 | ||||
411 | static int smu_early_init(void *handle) | |||
412 | { | |||
413 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |||
414 | struct smu_context *smu = &adev->smu; | |||
415 | ||||
416 | smu->adev = adev; | |||
417 | smu->pm_enabled = !!amdgpu_dpm; | |||
418 | smu->is_apu = false0; | |||
419 | rw_init(&smu->mutex, "smurw")_rw_init_flags(&smu->mutex, "smurw", 0, ((void *)0)); | |||
420 | rw_init(&smu->smu_baco.mutex, "smubc")_rw_init_flags(&smu->smu_baco.mutex, "smubc", 0, ((void *)0)); | |||
421 | smu->smu_baco.state = SMU_BACO_STATE_EXIT; | |||
422 | smu->smu_baco.platform_support = false0; | |||
423 | ||||
424 | return smu_set_funcs(adev); | |||
425 | } | |||
426 | ||||
427 | static int smu_set_default_dpm_table(struct smu_context *smu) | |||
428 | { | |||
429 | struct smu_power_context *smu_power = &smu->smu_power; | |||
430 | struct smu_power_gate *power_gate = &smu_power->power_gate; | |||
431 | int vcn_gate, jpeg_gate; | |||
432 | int ret = 0; | |||
433 | ||||
434 | if (!smu->ppt_funcs->set_default_dpm_table) | |||
435 | return 0; | |||
436 | ||||
437 | mutex_lock(&power_gate->vcn_gate_lock)rw_enter_write(&power_gate->vcn_gate_lock); | |||
438 | mutex_lock(&power_gate->jpeg_gate_lock)rw_enter_write(&power_gate->jpeg_gate_lock); | |||
439 | ||||
440 | vcn_gate = atomic_read(&power_gate->vcn_gated)({ typeof(*(&power_gate->vcn_gated)) __tmp = *(volatile typeof(*(&power_gate->vcn_gated)) *)&(*(&power_gate ->vcn_gated)); membar_datadep_consumer(); __tmp; }); | |||
441 | jpeg_gate = atomic_read(&power_gate->jpeg_gated)({ typeof(*(&power_gate->jpeg_gated)) __tmp = *(volatile typeof(*(&power_gate->jpeg_gated)) *)&(*(&power_gate ->jpeg_gated)); membar_datadep_consumer(); __tmp; }); | |||
442 | ||||
443 | ret = smu_dpm_set_vcn_enable_locked(smu, true1); | |||
444 | if (ret) | |||
445 | goto err0_out; | |||
446 | ||||
447 | ret = smu_dpm_set_jpeg_enable_locked(smu, true1); | |||
448 | if (ret) | |||
449 | goto err1_out; | |||
450 | ||||
451 | ret = smu->ppt_funcs->set_default_dpm_table(smu); | |||
452 | if (ret) | |||
453 | dev_err(smu->adev->dev,printf("drm:pid%d:%s *ERROR* " "Failed to setup default dpm clock tables!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__) | |||
454 | "Failed to setup default dpm clock tables!\n")printf("drm:pid%d:%s *ERROR* " "Failed to setup default dpm clock tables!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
455 | ||||
456 | smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate); | |||
457 | err1_out: | |||
458 | smu_dpm_set_vcn_enable_locked(smu, !vcn_gate); | |||
459 | err0_out: | |||
460 | mutex_unlock(&power_gate->jpeg_gate_lock)rw_exit_write(&power_gate->jpeg_gate_lock); | |||
461 | mutex_unlock(&power_gate->vcn_gate_lock)rw_exit_write(&power_gate->vcn_gate_lock); | |||
462 | ||||
463 | return ret; | |||
464 | } | |||
465 | ||||
466 | static int smu_late_init(void *handle) | |||
467 | { | |||
468 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |||
469 | struct smu_context *smu = &adev->smu; | |||
470 | int ret = 0; | |||
471 | ||||
472 | if (!smu->pm_enabled) | |||
473 | return 0; | |||
474 | ||||
475 | ret = smu_post_init(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->post_init ? ( smu)->ppt_funcs->post_init(smu) : 0) : -22); | |||
476 | if (ret) { | |||
477 | dev_err(adev->dev, "Failed to post smu init!\n")printf("drm:pid%d:%s *ERROR* " "Failed to post smu init!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
478 | return ret; | |||
479 | } | |||
480 | ||||
481 | ret = smu_set_default_od_settings(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings(smu) : 0) : -22); | |||
482 | if (ret) { | |||
483 | dev_err(adev->dev, "Failed to setup default OD settings!\n")printf("drm:pid%d:%s *ERROR* " "Failed to setup default OD settings!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
484 | return ret; | |||
485 | } | |||
486 | ||||
487 | ret = smu_populate_umd_state_clk(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk(smu) : 0) : -22); | |||
488 | if (ret) { | |||
489 | dev_err(adev->dev, "Failed to populate UMD state clocks!\n")printf("drm:pid%d:%s *ERROR* " "Failed to populate UMD state clocks!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
490 | return ret; | |||
491 | } | |||
492 | ||||
493 | ret = smu_get_asic_power_limits(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit(smu) : 0) : -22); | |||
494 | if (ret) { | |||
495 | dev_err(adev->dev, "Failed to get asic power limits!\n")printf("drm:pid%d:%s *ERROR* " "Failed to get asic power limits!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
496 | return ret; | |||
497 | } | |||
498 | ||||
499 | smu_get_unique_id(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_unique_id ? (smu)->ppt_funcs->get_unique_id(smu) : 0) : -22); | |||
500 | ||||
501 | smu_get_fan_parameters(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_fan_parameters ? (smu)->ppt_funcs->get_fan_parameters(smu) : 0) : -22 ); | |||
502 | ||||
503 | smu_handle_task(&adev->smu, | |||
504 | smu->smu_dpm.dpm_level, | |||
505 | AMD_PP_TASK_COMPLETE_INIT, | |||
506 | false0); | |||
507 | ||||
508 | return 0; | |||
509 | } | |||
510 | ||||
511 | static int smu_init_fb_allocations(struct smu_context *smu) | |||
512 | { | |||
513 | struct amdgpu_device *adev = smu->adev; | |||
514 | struct smu_table_context *smu_table = &smu->smu_table; | |||
515 | struct smu_table *tables = smu_table->tables; | |||
516 | struct smu_table *driver_table = &(smu_table->driver_table); | |||
517 | uint32_t max_table_size = 0; | |||
518 | int ret, i; | |||
519 | ||||
520 | /* VRAM allocation for tool table */ | |||
521 | if (tables[SMU_TABLE_PMSTATUSLOG].size) { | |||
522 | ret = amdgpu_bo_create_kernel(adev, | |||
523 | tables[SMU_TABLE_PMSTATUSLOG].size, | |||
524 | tables[SMU_TABLE_PMSTATUSLOG].align, | |||
525 | tables[SMU_TABLE_PMSTATUSLOG].domain, | |||
526 | &tables[SMU_TABLE_PMSTATUSLOG].bo, | |||
527 | &tables[SMU_TABLE_PMSTATUSLOG].mc_address, | |||
528 | &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); | |||
529 | if (ret) { | |||
530 | dev_err(adev->dev, "VRAM allocation for tool table failed!\n")printf("drm:pid%d:%s *ERROR* " "VRAM allocation for tool table failed!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
531 | return ret; | |||
532 | } | |||
533 | } | |||
534 | ||||
535 | /* VRAM allocation for driver table */ | |||
536 | for (i = 0; i < SMU_TABLE_COUNT; i++) { | |||
537 | if (tables[i].size == 0) | |||
538 | continue; | |||
539 | ||||
540 | if (i == SMU_TABLE_PMSTATUSLOG) | |||
541 | continue; | |||
542 | ||||
543 | if (max_table_size < tables[i].size) | |||
544 | max_table_size = tables[i].size; | |||
545 | } | |||
546 | ||||
547 | driver_table->size = max_table_size; | |||
548 | driver_table->align = PAGE_SIZE(1 << 12); | |||
549 | driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM0x4; | |||
550 | ||||
551 | ret = amdgpu_bo_create_kernel(adev, | |||
552 | driver_table->size, | |||
553 | driver_table->align, | |||
554 | driver_table->domain, | |||
555 | &driver_table->bo, | |||
556 | &driver_table->mc_address, | |||
557 | &driver_table->cpu_addr); | |||
558 | if (ret) { | |||
559 | dev_err(adev->dev, "VRAM allocation for driver table failed!\n")printf("drm:pid%d:%s *ERROR* " "VRAM allocation for driver table failed!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
560 | if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) | |||
561 | amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, | |||
562 | &tables[SMU_TABLE_PMSTATUSLOG].mc_address, | |||
563 | &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); | |||
564 | } | |||
565 | ||||
566 | return ret; | |||
567 | } | |||
568 | ||||
569 | static int smu_fini_fb_allocations(struct smu_context *smu) | |||
570 | { | |||
571 | struct smu_table_context *smu_table = &smu->smu_table; | |||
572 | struct smu_table *tables = smu_table->tables; | |||
573 | struct smu_table *driver_table = &(smu_table->driver_table); | |||
574 | ||||
575 | if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) | |||
576 | amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, | |||
577 | &tables[SMU_TABLE_PMSTATUSLOG].mc_address, | |||
578 | &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); | |||
579 | ||||
580 | amdgpu_bo_free_kernel(&driver_table->bo, | |||
581 | &driver_table->mc_address, | |||
582 | &driver_table->cpu_addr); | |||
583 | ||||
584 | return 0; | |||
585 | } | |||
586 | ||||
587 | /** | |||
588 | * smu_alloc_memory_pool - allocate memory pool in the system memory | |||
589 | * | |||
590 | * @smu: amdgpu_device pointer | |||
591 | * | |||
592 | * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr | |||
593 | * and DramLogSetDramAddr can notify it changed. | |||
594 | * | |||
595 | * Returns 0 on success, error on failure. | |||
596 | */ | |||
597 | static int smu_alloc_memory_pool(struct smu_context *smu) | |||
598 | { | |||
599 | struct amdgpu_device *adev = smu->adev; | |||
600 | struct smu_table_context *smu_table = &smu->smu_table; | |||
601 | struct smu_table *memory_pool = &smu_table->memory_pool; | |||
602 | uint64_t pool_size = smu->pool_size; | |||
603 | int ret = 0; | |||
604 | ||||
605 | if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) | |||
606 | return ret; | |||
607 | ||||
608 | memory_pool->size = pool_size; | |||
609 | memory_pool->align = PAGE_SIZE(1 << 12); | |||
610 | memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT0x2; | |||
611 | ||||
612 | switch (pool_size) { | |||
613 | case SMU_MEMORY_POOL_SIZE_256_MB: | |||
614 | case SMU_MEMORY_POOL_SIZE_512_MB: | |||
615 | case SMU_MEMORY_POOL_SIZE_1_GB: | |||
616 | case SMU_MEMORY_POOL_SIZE_2_GB: | |||
617 | ret = amdgpu_bo_create_kernel(adev, | |||
618 | memory_pool->size, | |||
619 | memory_pool->align, | |||
620 | memory_pool->domain, | |||
621 | &memory_pool->bo, | |||
622 | &memory_pool->mc_address, | |||
623 | &memory_pool->cpu_addr); | |||
624 | if (ret) | |||
625 | dev_err(adev->dev, "VRAM allocation for dramlog failed!\n")printf("drm:pid%d:%s *ERROR* " "VRAM allocation for dramlog failed!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
626 | break; | |||
627 | default: | |||
628 | break; | |||
629 | } | |||
630 | ||||
631 | return ret; | |||
632 | } | |||
633 | ||||
634 | static int smu_free_memory_pool(struct smu_context *smu) | |||
635 | { | |||
636 | struct smu_table_context *smu_table = &smu->smu_table; | |||
637 | struct smu_table *memory_pool = &smu_table->memory_pool; | |||
638 | ||||
639 | if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) | |||
640 | return 0; | |||
641 | ||||
642 | amdgpu_bo_free_kernel(&memory_pool->bo, | |||
643 | &memory_pool->mc_address, | |||
644 | &memory_pool->cpu_addr); | |||
645 | ||||
646 | memset(memory_pool, 0, sizeof(struct smu_table))__builtin_memset((memory_pool), (0), (sizeof(struct smu_table ))); | |||
647 | ||||
648 | return 0; | |||
649 | } | |||
650 | ||||
651 | static int smu_alloc_dummy_read_table(struct smu_context *smu) | |||
652 | { | |||
653 | struct smu_table_context *smu_table = &smu->smu_table; | |||
654 | struct smu_table *dummy_read_1_table = | |||
655 | &smu_table->dummy_read_1_table; | |||
656 | struct amdgpu_device *adev = smu->adev; | |||
657 | int ret = 0; | |||
658 | ||||
659 | dummy_read_1_table->size = 0x40000; | |||
660 | dummy_read_1_table->align = PAGE_SIZE(1 << 12); | |||
661 | dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM0x4; | |||
662 | ||||
663 | ret = amdgpu_bo_create_kernel(adev, | |||
664 | dummy_read_1_table->size, | |||
665 | dummy_read_1_table->align, | |||
666 | dummy_read_1_table->domain, | |||
667 | &dummy_read_1_table->bo, | |||
668 | &dummy_read_1_table->mc_address, | |||
669 | &dummy_read_1_table->cpu_addr); | |||
670 | if (ret) | |||
671 | dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n")printf("drm:pid%d:%s *ERROR* " "VRAM allocation for dummy read table failed!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
672 | ||||
673 | return ret; | |||
674 | } | |||
675 | ||||
676 | static void smu_free_dummy_read_table(struct smu_context *smu) | |||
677 | { | |||
678 | struct smu_table_context *smu_table = &smu->smu_table; | |||
679 | struct smu_table *dummy_read_1_table = | |||
680 | &smu_table->dummy_read_1_table; | |||
681 | ||||
682 | ||||
683 | amdgpu_bo_free_kernel(&dummy_read_1_table->bo, | |||
684 | &dummy_read_1_table->mc_address, | |||
685 | &dummy_read_1_table->cpu_addr); | |||
686 | ||||
687 | memset(dummy_read_1_table, 0, sizeof(struct smu_table))__builtin_memset((dummy_read_1_table), (0), (sizeof(struct smu_table ))); | |||
688 | } | |||
689 | ||||
690 | static int smu_smc_table_sw_init(struct smu_context *smu) | |||
691 | { | |||
692 | int ret; | |||
693 | ||||
694 | /** | |||
695 | * Create smu_table structure, and init smc tables such as | |||
696 | * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. | |||
697 | */ | |||
698 | ret = smu_init_smc_tables(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->init_smc_tables ? (smu)->ppt_funcs->init_smc_tables(smu) : 0) : -22); | |||
699 | if (ret) { | |||
700 | dev_err(smu->adev->dev, "Failed to init smc tables!\n")printf("drm:pid%d:%s *ERROR* " "Failed to init smc tables!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
701 | return ret; | |||
702 | } | |||
703 | ||||
704 | /** | |||
705 | * Create smu_power_context structure, and allocate smu_dpm_context and | |||
706 | * context size to fill the smu_power_context data. | |||
707 | */ | |||
708 | ret = smu_init_power(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->init_power ? ( smu)->ppt_funcs->init_power(smu) : 0) : -22); | |||
709 | if (ret) { | |||
710 | dev_err(smu->adev->dev, "Failed to init smu_init_power!\n")printf("drm:pid%d:%s *ERROR* " "Failed to init smu_init_power!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
711 | return ret; | |||
712 | } | |||
713 | ||||
714 | /* | |||
715 | * allocate vram bos to store smc table contents. | |||
716 | */ | |||
717 | ret = smu_init_fb_allocations(smu); | |||
718 | if (ret) | |||
719 | return ret; | |||
720 | ||||
721 | ret = smu_alloc_memory_pool(smu); | |||
722 | if (ret) | |||
723 | return ret; | |||
724 | ||||
725 | ret = smu_alloc_dummy_read_table(smu); | |||
726 | if (ret) | |||
727 | return ret; | |||
728 | ||||
729 | ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c)((smu)->ppt_funcs ? ((smu)->ppt_funcs->i2c_init ? (smu )->ppt_funcs->i2c_init(smu, &smu->adev->pm.smu_i2c ) : 0) : -22); | |||
730 | if (ret) | |||
731 | return ret; | |||
732 | ||||
733 | return 0; | |||
734 | } | |||
735 | ||||
736 | static int smu_smc_table_sw_fini(struct smu_context *smu) | |||
737 | { | |||
738 | int ret; | |||
739 | ||||
740 | smu_i2c_fini(smu, &smu->adev->pm.smu_i2c)((smu)->ppt_funcs ? ((smu)->ppt_funcs->i2c_fini ? (smu )->ppt_funcs->i2c_fini(smu, &smu->adev->pm.smu_i2c ) : 0) : -22); | |||
741 | ||||
742 | smu_free_dummy_read_table(smu); | |||
743 | ||||
744 | ret = smu_free_memory_pool(smu); | |||
745 | if (ret) | |||
746 | return ret; | |||
747 | ||||
748 | ret = smu_fini_fb_allocations(smu); | |||
749 | if (ret) | |||
750 | return ret; | |||
751 | ||||
752 | ret = smu_fini_power(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->fini_power ? ( smu)->ppt_funcs->fini_power(smu) : 0) : -22); | |||
753 | if (ret) { | |||
754 | dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n")printf("drm:pid%d:%s *ERROR* " "Failed to init smu_fini_power!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
755 | return ret; | |||
756 | } | |||
757 | ||||
758 | ret = smu_fini_smc_tables(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->fini_smc_tables ? (smu)->ppt_funcs->fini_smc_tables(smu) : 0) : -22); | |||
759 | if (ret) { | |||
760 | dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n")printf("drm:pid%d:%s *ERROR* " "Failed to smu_fini_smc_tables!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
761 | return ret; | |||
762 | } | |||
763 | ||||
764 | return 0; | |||
765 | } | |||
766 | ||||
767 | static void smu_throttling_logging_work_fn(struct work_struct *work) | |||
768 | { | |||
769 | struct smu_context *smu = container_of(work, struct smu_context,({ const __typeof( ((struct smu_context *)0)->throttling_logging_work ) *__mptr = (work); (struct smu_context *)( (char *)__mptr - __builtin_offsetof(struct smu_context, throttling_logging_work ) );}) | |||
770 | throttling_logging_work)({ const __typeof( ((struct smu_context *)0)->throttling_logging_work ) *__mptr = (work); (struct smu_context *)( (char *)__mptr - __builtin_offsetof(struct smu_context, throttling_logging_work ) );}); | |||
771 | ||||
772 | smu_log_thermal_throttling(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->log_thermal_throttling_event ? (smu)->ppt_funcs->log_thermal_throttling_event(smu) : 0) : -22); | |||
773 | } | |||
774 | ||||
775 | static void smu_interrupt_work_fn(struct work_struct *work) | |||
776 | { | |||
777 | struct smu_context *smu = container_of(work, struct smu_context,({ const __typeof( ((struct smu_context *)0)->interrupt_work ) *__mptr = (work); (struct smu_context *)( (char *)__mptr - __builtin_offsetof(struct smu_context, interrupt_work) );}) | |||
778 | interrupt_work)({ const __typeof( ((struct smu_context *)0)->interrupt_work ) *__mptr = (work); (struct smu_context *)( (char *)__mptr - __builtin_offsetof(struct smu_context, interrupt_work) );}); | |||
779 | ||||
780 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
781 | ||||
782 | if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) | |||
783 | smu->ppt_funcs->interrupt_work(smu); | |||
784 | ||||
785 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
786 | } | |||
787 | ||||
788 | static int smu_sw_init(void *handle) | |||
789 | { | |||
790 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |||
791 | struct smu_context *smu = &adev->smu; | |||
792 | int ret; | |||
793 | ||||
794 | smu->pool_size = adev->pm.smu_prv_buffer_size; | |||
795 | smu->smu_feature.feature_num = SMU_FEATURE_MAX(64); | |||
796 | rw_init(&smu->smu_feature.mutex, "smuft")_rw_init_flags(&smu->smu_feature.mutex, "smuft", 0, (( void *)0)); | |||
797 | bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX(64)); | |||
798 | bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX(64)); | |||
799 | bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX(64)); | |||
800 | ||||
801 | rw_init(&smu->sensor_lock, "smusen")_rw_init_flags(&smu->sensor_lock, "smusen", 0, ((void * )0)); | |||
802 | rw_init(&smu->metrics_lock, "smumt")_rw_init_flags(&smu->metrics_lock, "smumt", 0, ((void * )0)); | |||
803 | rw_init(&smu->message_lock, "smuml")_rw_init_flags(&smu->message_lock, "smuml", 0, ((void * )0)); | |||
804 | ||||
805 | INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); | |||
806 | INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); | |||
807 | atomic64_set(&smu->throttle_int_counter, 0)({ typeof(*(&smu->throttle_int_counter)) __tmp = ((0)) ; *(volatile typeof(*(&smu->throttle_int_counter)) *)& (*(&smu->throttle_int_counter)) = __tmp; __tmp; }); | |||
808 | smu->watermarks_bitmap = 0; | |||
809 | smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; | |||
810 | smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; | |||
811 | ||||
812 | atomic_set(&smu->smu_power.power_gate.vcn_gated, 1)({ typeof(*(&smu->smu_power.power_gate.vcn_gated)) __tmp = ((1)); *(volatile typeof(*(&smu->smu_power.power_gate .vcn_gated)) *)&(*(&smu->smu_power.power_gate.vcn_gated )) = __tmp; __tmp; }); | |||
813 | atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1)({ typeof(*(&smu->smu_power.power_gate.jpeg_gated)) __tmp = ((1)); *(volatile typeof(*(&smu->smu_power.power_gate .jpeg_gated)) *)&(*(&smu->smu_power.power_gate.jpeg_gated )) = __tmp; __tmp; }); | |||
814 | rw_init(&smu->smu_power.power_gate.vcn_gate_lock, "vcngl")_rw_init_flags(&smu->smu_power.power_gate.vcn_gate_lock , "vcngl", 0, ((void *)0)); | |||
815 | rw_init(&smu->smu_power.power_gate.jpeg_gate_lock, "jpgglk")_rw_init_flags(&smu->smu_power.power_gate.jpeg_gate_lock , "jpgglk", 0, ((void *)0)); | |||
816 | ||||
817 | smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; | |||
818 | smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; | |||
819 | smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; | |||
820 | smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; | |||
821 | smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; | |||
822 | smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; | |||
823 | smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; | |||
824 | smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; | |||
825 | ||||
826 | smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; | |||
827 | smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; | |||
828 | smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; | |||
829 | smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; | |||
830 | smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; | |||
831 | smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; | |||
832 | smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; | |||
833 | smu->display_config = &adev->pm.pm_display_cfg; | |||
834 | ||||
835 | smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; | |||
836 | smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; | |||
837 | ||||
838 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { | |||
839 | ret = smu_init_microcode(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->init_microcode ? (smu)->ppt_funcs->init_microcode(smu) : 0) : -22); | |||
840 | if (ret) { | |||
841 | dev_err(adev->dev, "Failed to load smu firmware!\n")printf("drm:pid%d:%s *ERROR* " "Failed to load smu firmware!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
842 | return ret; | |||
843 | } | |||
844 | } | |||
845 | ||||
846 | ret = smu_smc_table_sw_init(smu); | |||
847 | if (ret) { | |||
848 | dev_err(adev->dev, "Failed to sw init smc table!\n")printf("drm:pid%d:%s *ERROR* " "Failed to sw init smc table!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
849 | return ret; | |||
850 | } | |||
851 | ||||
852 | ret = smu_register_irq_handler(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->register_irq_handler ? (smu)->ppt_funcs->register_irq_handler(smu) : 0) : - 22); | |||
853 | if (ret) { | |||
854 | dev_err(adev->dev, "Failed to register smc irq handler!\n")printf("drm:pid%d:%s *ERROR* " "Failed to register smc irq handler!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
855 | return ret; | |||
856 | } | |||
857 | ||||
858 | return 0; | |||
859 | } | |||
860 | ||||
861 | static int smu_sw_fini(void *handle) | |||
862 | { | |||
863 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |||
864 | struct smu_context *smu = &adev->smu; | |||
865 | int ret; | |||
866 | ||||
867 | ret = smu_smc_table_sw_fini(smu); | |||
868 | if (ret) { | |||
869 | dev_err(adev->dev, "Failed to sw fini smc table!\n")printf("drm:pid%d:%s *ERROR* " "Failed to sw fini smc table!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
870 | return ret; | |||
871 | } | |||
872 | ||||
873 | smu_fini_microcode(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->fini_microcode ? (smu)->ppt_funcs->fini_microcode(smu) : 0) : -22); | |||
874 | ||||
875 | return 0; | |||
876 | } | |||
877 | ||||
878 | static int smu_get_thermal_temperature_range(struct smu_context *smu) | |||
879 | { | |||
880 | struct amdgpu_device *adev = smu->adev; | |||
881 | struct smu_temperature_range *range = | |||
882 | &smu->thermal_range; | |||
883 | int ret = 0; | |||
884 | ||||
885 | if (!smu->ppt_funcs->get_thermal_temperature_range) | |||
886 | return 0; | |||
887 | ||||
888 | ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); | |||
889 | if (ret) | |||
890 | return ret; | |||
891 | ||||
892 | adev->pm.dpm.thermal.min_temp = range->min; | |||
893 | adev->pm.dpm.thermal.max_temp = range->max; | |||
894 | adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max; | |||
895 | adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min; | |||
896 | adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max; | |||
897 | adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max; | |||
898 | adev->pm.dpm.thermal.min_mem_temp = range->mem_min; | |||
899 | adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max; | |||
900 | adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max; | |||
901 | ||||
902 | return ret; | |||
903 | } | |||
904 | ||||
905 | static int smu_smc_hw_setup(struct smu_context *smu) | |||
906 | { | |||
907 | struct amdgpu_device *adev = smu->adev; | |||
908 | uint32_t pcie_gen = 0, pcie_width = 0; | |||
909 | int ret; | |||
910 | ||||
911 | if (adev->in_suspend && smu_is_dpm_running(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running(smu) : 0) : -22)) { | |||
912 | dev_info(adev->dev, "dpm has been enabled\n")do { } while(0); | |||
913 | return 0; | |||
914 | } | |||
915 | ||||
916 | ret = smu_init_display_count(smu, 0)((smu)->ppt_funcs ? ((smu)->ppt_funcs->init_display_count ? (smu)->ppt_funcs->init_display_count(smu, 0) : 0) : - 22); | |||
917 | if (ret) { | |||
918 | dev_info(adev->dev, "Failed to pre-set display count as 0!\n")do { } while(0); | |||
919 | return ret; | |||
920 | } | |||
921 | ||||
922 | ret = smu_set_driver_table_location(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_driver_table_location ? (smu)->ppt_funcs->set_driver_table_location(smu) : 0 ) : -22); | |||
923 | if (ret) { | |||
924 | dev_err(adev->dev, "Failed to SetDriverDramAddr!\n")printf("drm:pid%d:%s *ERROR* " "Failed to SetDriverDramAddr!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
925 | return ret; | |||
926 | } | |||
927 | ||||
928 | /* | |||
929 | * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. | |||
930 | */ | |||
931 | ret = smu_set_tool_table_location(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location(smu) : 0) : -22); | |||
932 | if (ret) { | |||
933 | dev_err(adev->dev, "Failed to SetToolsDramAddr!\n")printf("drm:pid%d:%s *ERROR* " "Failed to SetToolsDramAddr!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
934 | return ret; | |||
935 | } | |||
936 | ||||
937 | /* | |||
938 | * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify | |||
939 | * pool location. | |||
940 | */ | |||
941 | ret = smu_notify_memory_pool_location(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->notify_memory_pool_location ? (smu)->ppt_funcs->notify_memory_pool_location(smu) : 0) : -22); | |||
942 | if (ret) { | |||
943 | dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n")printf("drm:pid%d:%s *ERROR* " "Failed to SetDramLogDramAddr!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
944 | return ret; | |||
945 | } | |||
946 | ||||
947 | /* smu_dump_pptable(smu); */ | |||
948 | /* | |||
949 | * Copy pptable bo in the vram to smc with SMU MSGs such as | |||
950 | * SetDriverDramAddr and TransferTableDram2Smu. | |||
951 | */ | |||
952 | ret = smu_write_pptable(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable(smu) : 0) : -22); | |||
953 | if (ret) { | |||
954 | dev_err(adev->dev, "Failed to transfer pptable to SMC!\n")printf("drm:pid%d:%s *ERROR* " "Failed to transfer pptable to SMC!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
955 | return ret; | |||
956 | } | |||
957 | ||||
958 | /* issue Run*Btc msg */ | |||
959 | ret = smu_run_btc(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->run_btc ? (smu )->ppt_funcs->run_btc(smu) : 0) : -22); | |||
960 | if (ret) | |||
961 | return ret; | |||
962 | ||||
963 | ret = smu_feature_set_allowed_mask(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_allowed_mask ? (smu)->ppt_funcs->set_allowed_mask(smu) : 0) : -22); | |||
964 | if (ret) { | |||
965 | dev_err(adev->dev, "Failed to set driver allowed features mask!\n")printf("drm:pid%d:%s *ERROR* " "Failed to set driver allowed features mask!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
966 | return ret; | |||
967 | } | |||
968 | ||||
969 | ret = smu_system_features_control(smu, true)((smu)->ppt_funcs ? ((smu)->ppt_funcs->system_features_control ? (smu)->ppt_funcs->system_features_control(smu, 1) : 0 ) : -22); | |||
970 | if (ret) { | |||
971 | dev_err(adev->dev, "Failed to enable requested dpm features!\n")printf("drm:pid%d:%s *ERROR* " "Failed to enable requested dpm features!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
972 | return ret; | |||
973 | } | |||
974 | ||||
975 | if (!smu_is_dpm_running(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running(smu) : 0) : -22)) | |||
976 | dev_info(adev->dev, "dpm has been disabled\n")do { } while(0); | |||
977 | ||||
978 | if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN40x00080000) | |||
979 | pcie_gen = 3; | |||
980 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN30x00040000) | |||
981 | pcie_gen = 2; | |||
982 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN20x00020000) | |||
983 | pcie_gen = 1; | |||
984 | else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN10x00010000) | |||
985 | pcie_gen = 0; | |||
986 | ||||
987 | /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 | |||
988 | * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 | |||
989 | * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 | |||
990 | */ | |||
991 | if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X160x00200000) | |||
992 | pcie_width = 6; | |||
993 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X120x00100000) | |||
994 | pcie_width = 5; | |||
995 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X80x00080000) | |||
996 | pcie_width = 4; | |||
997 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X40x00040000) | |||
998 | pcie_width = 3; | |||
999 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X20x00020000) | |||
1000 | pcie_width = 2; | |||
1001 | else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X10x00010000) | |||
1002 | pcie_width = 1; | |||
1003 | ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width)((smu)->ppt_funcs ? ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters(smu, pcie_gen , pcie_width) : 0) : -22); | |||
1004 | if (ret) { | |||
1005 | dev_err(adev->dev, "Attempt to override pcie params failed!\n")printf("drm:pid%d:%s *ERROR* " "Attempt to override pcie params failed!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1006 | return ret; | |||
1007 | } | |||
1008 | ||||
1009 | ret = smu_get_thermal_temperature_range(smu); | |||
1010 | if (ret) { | |||
1011 | dev_err(adev->dev, "Failed to get thermal temperature ranges!\n")printf("drm:pid%d:%s *ERROR* " "Failed to get thermal temperature ranges!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1012 | return ret; | |||
1013 | } | |||
1014 | ||||
1015 | ret = smu_enable_thermal_alert(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->enable_thermal_alert ? (smu)->ppt_funcs->enable_thermal_alert(smu) : 0) : - 22); | |||
1016 | if (ret) { | |||
1017 | dev_err(adev->dev, "Failed to enable thermal alert!\n")printf("drm:pid%d:%s *ERROR* " "Failed to enable thermal alert!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1018 | return ret; | |||
1019 | } | |||
1020 | ||||
1021 | /* | |||
1022 | * Set initialized values (get from vbios) to dpm tables context such as | |||
1023 | * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each | |||
1024 | * type of clks. | |||
1025 | */ | |||
1026 | ret = smu_set_default_dpm_table(smu); | |||
1027 | if (ret) { | |||
1028 | dev_err(adev->dev, "Failed to setup default dpm clock tables!\n")printf("drm:pid%d:%s *ERROR* " "Failed to setup default dpm clock tables!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1029 | return ret; | |||
1030 | } | |||
1031 | ||||
1032 | ret = smu_notify_display_change(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->notify_display_change ? (smu)->ppt_funcs->notify_display_change(smu) : 0) : - 22); | |||
1033 | if (ret) | |||
1034 | return ret; | |||
1035 | ||||
1036 | /* | |||
1037 | * Set min deep sleep dce fclk with bootup value from vbios via | |||
1038 | * SetMinDeepSleepDcefclk MSG. | |||
1039 | */ | |||
1040 | ret = smu_set_min_dcef_deep_sleep(smu,((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep(smu, smu-> smu_table.boot_values.dcefclk / 100) : 0) : -22) | |||
1041 | smu->smu_table.boot_values.dcefclk / 100)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep(smu, smu-> smu_table.boot_values.dcefclk / 100) : 0) : -22); | |||
1042 | if (ret) | |||
1043 | return ret; | |||
1044 | ||||
1045 | return ret; | |||
1046 | } | |||
1047 | ||||
1048 | static int smu_start_smc_engine(struct smu_context *smu) | |||
1049 | { | |||
1050 | struct amdgpu_device *adev = smu->adev; | |||
1051 | int ret = 0; | |||
1052 | ||||
1053 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { | |||
1054 | if (adev->asic_type < CHIP_NAVI10) { | |||
1055 | if (smu->ppt_funcs->load_microcode) { | |||
1056 | ret = smu->ppt_funcs->load_microcode(smu); | |||
1057 | if (ret) | |||
1058 | return ret; | |||
1059 | } | |||
1060 | } | |||
1061 | } | |||
1062 | ||||
1063 | if (smu->ppt_funcs->check_fw_status) { | |||
1064 | ret = smu->ppt_funcs->check_fw_status(smu); | |||
1065 | if (ret) { | |||
1066 | dev_err(adev->dev, "SMC is not ready\n")printf("drm:pid%d:%s *ERROR* " "SMC is not ready\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__); | |||
1067 | return ret; | |||
1068 | } | |||
1069 | } | |||
1070 | ||||
1071 | /* | |||
1072 | * Send msg GetDriverIfVersion to check if the return value is equal | |||
1073 | * with DRIVER_IF_VERSION of smc header. | |||
1074 | */ | |||
1075 | ret = smu_check_fw_version(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->check_fw_version ? (smu)->ppt_funcs->check_fw_version(smu) : 0) : -22); | |||
1076 | if (ret) | |||
1077 | return ret; | |||
1078 | ||||
1079 | return ret; | |||
1080 | } | |||
1081 | ||||
1082 | static int smu_hw_init(void *handle) | |||
1083 | { | |||
1084 | int ret; | |||
1085 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |||
1086 | struct smu_context *smu = &adev->smu; | |||
1087 | ||||
1088 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2)) && !amdgpu_sriov_is_pp_one_vf(adev)((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)) { | |||
1089 | smu->pm_enabled = false0; | |||
1090 | return 0; | |||
1091 | } | |||
1092 | ||||
1093 | ret = smu_start_smc_engine(smu); | |||
1094 | if (ret) { | |||
1095 | dev_err(adev->dev, "SMC engine is not correctly up!\n")printf("drm:pid%d:%s *ERROR* " "SMC engine is not correctly up!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1096 | return ret; | |||
1097 | } | |||
1098 | ||||
1099 | if (smu->is_apu) { | |||
1100 | smu_powergate_sdma(&adev->smu, false)((&adev->smu)->ppt_funcs ? ((&adev->smu)-> ppt_funcs->powergate_sdma ? (&adev->smu)->ppt_funcs ->powergate_sdma(&adev->smu, 0) : 0) : -22); | |||
1101 | smu_dpm_set_vcn_enable(smu, true1); | |||
1102 | smu_dpm_set_jpeg_enable(smu, true1); | |||
1103 | smu_set_gfx_cgpg(&adev->smu, true1); | |||
1104 | } | |||
1105 | ||||
1106 | if (!smu->pm_enabled) | |||
1107 | return 0; | |||
1108 | ||||
1109 | /* get boot_values from vbios to set revision, gfxclk, and etc. */ | |||
1110 | ret = smu_get_vbios_bootup_values(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_vbios_bootup_values ? (smu)->ppt_funcs->get_vbios_bootup_values(smu) : 0) : -22); | |||
1111 | if (ret) { | |||
1112 | dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n")printf("drm:pid%d:%s *ERROR* " "Failed to get VBIOS boot clock values!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1113 | return ret; | |||
1114 | } | |||
1115 | ||||
1116 | ret = smu_setup_pptable(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->setup_pptable ? (smu)->ppt_funcs->setup_pptable(smu) : 0) : -22); | |||
1117 | if (ret) { | |||
1118 | dev_err(adev->dev, "Failed to setup pptable!\n")printf("drm:pid%d:%s *ERROR* " "Failed to setup pptable!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1119 | return ret; | |||
1120 | } | |||
1121 | ||||
1122 | ret = smu_get_driver_allowed_feature_mask(smu); | |||
1123 | if (ret) | |||
1124 | return ret; | |||
1125 | ||||
1126 | ret = smu_smc_hw_setup(smu); | |||
1127 | if (ret) { | |||
1128 | dev_err(adev->dev, "Failed to setup smc hw!\n")printf("drm:pid%d:%s *ERROR* " "Failed to setup smc hw!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1129 | return ret; | |||
1130 | } | |||
1131 | ||||
1132 | /* | |||
1133 | * Move maximum sustainable clock retrieving here considering | |||
1134 | * 1. It is not needed on resume(from S3). | |||
1135 | * 2. DAL settings come between .hw_init and .late_init of SMU. | |||
1136 | * And DAL needs to know the maximum sustainable clocks. Thus | |||
1137 | * it cannot be put in .late_init(). | |||
1138 | */ | |||
1139 | ret = smu_init_max_sustainable_clocks(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->init_max_sustainable_clocks ? (smu)->ppt_funcs->init_max_sustainable_clocks(smu) : 0) : -22); | |||
1140 | if (ret) { | |||
1141 | dev_err(adev->dev, "Failed to init max sustainable clocks!\n")printf("drm:pid%d:%s *ERROR* " "Failed to init max sustainable clocks!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1142 | return ret; | |||
1143 | } | |||
1144 | ||||
1145 | adev->pm.dpm_enabled = true1; | |||
1146 | ||||
1147 | dev_info(adev->dev, "SMU is initialized successfully!\n")do { } while(0); | |||
1148 | ||||
1149 | return 0; | |||
1150 | } | |||
1151 | ||||
1152 | static int smu_disable_dpms(struct smu_context *smu) | |||
1153 | { | |||
1154 | struct amdgpu_device *adev = smu->adev; | |||
1155 | int ret = 0; | |||
1156 | bool_Bool use_baco = !smu->is_apu && | |||
1157 | ((amdgpu_in_reset(adev) && | |||
1158 | (amdgpu_asic_reset_method(adev)(adev)->asic_funcs->reset_method((adev)) == AMD_RESET_METHOD_BACO)) || | |||
1159 | ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)(adev)->asic_funcs->supports_baco((adev)))); | |||
1160 | ||||
1161 | /* | |||
1162 | * For custom pptable uploading, skip the DPM features | |||
1163 | * disable process on Navi1x ASICs. | |||
1164 | * - As the gfx related features are under control of | |||
1165 | * RLC on those ASICs. RLC reinitialization will be | |||
1166 | * needed to reenable them. That will cost much more | |||
1167 | * efforts. | |||
1168 | * | |||
1169 | * - SMU firmware can handle the DPM reenablement | |||
1170 | * properly. | |||
1171 | */ | |||
1172 | if (smu->uploading_custom_pp_table && | |||
1173 | (adev->asic_type >= CHIP_NAVI10) && | |||
1174 | (adev->asic_type <= CHIP_NAVY_FLOUNDER)) | |||
1175 | return 0; | |||
1176 | ||||
1177 | /* | |||
1178 | * For Sienna_Cichlid, PMFW will handle the features disablement properly | |||
1179 | * on BACO in. Driver involvement is unnecessary. | |||
1180 | */ | |||
1181 | if ((adev->asic_type == CHIP_SIENNA_CICHLID) && | |||
1182 | use_baco) | |||
1183 | return 0; | |||
1184 | ||||
1185 | /* | |||
1186 | * For gpu reset, runpm and hibernation through BACO, | |||
1187 | * BACO feature has to be kept enabled. | |||
1188 | */ | |||
1189 | if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)((smu)->ppt_funcs ? ((smu)->ppt_funcs->feature_is_enabled ? (smu)->ppt_funcs->feature_is_enabled(smu, SMU_FEATURE_BACO_BIT ) : 0) : -22)) { | |||
1190 | ret = smu_disable_all_features_with_exception(smu,((smu)->ppt_funcs ? ((smu)->ppt_funcs->disable_all_features_with_exception ? (smu)->ppt_funcs->disable_all_features_with_exception (smu, SMU_FEATURE_BACO_BIT) : 0) : -22) | |||
1191 | SMU_FEATURE_BACO_BIT)((smu)->ppt_funcs ? ((smu)->ppt_funcs->disable_all_features_with_exception ? (smu)->ppt_funcs->disable_all_features_with_exception (smu, SMU_FEATURE_BACO_BIT) : 0) : -22); | |||
1192 | if (ret) | |||
1193 | dev_err(adev->dev, "Failed to disable smu features except BACO.\n")printf("drm:pid%d:%s *ERROR* " "Failed to disable smu features except BACO.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1194 | } else { | |||
1195 | ret = smu_system_features_control(smu, false)((smu)->ppt_funcs ? ((smu)->ppt_funcs->system_features_control ? (smu)->ppt_funcs->system_features_control(smu, 0) : 0 ) : -22); | |||
1196 | if (ret) | |||
1197 | dev_err(adev->dev, "Failed to disable smu features.\n")printf("drm:pid%d:%s *ERROR* " "Failed to disable smu features.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1198 | } | |||
1199 | ||||
1200 | if (adev->asic_type >= CHIP_NAVI10 && | |||
1201 | adev->gfx.rlc.funcs->stop) | |||
1202 | adev->gfx.rlc.funcs->stop(adev); | |||
1203 | ||||
1204 | return ret; | |||
1205 | } | |||
1206 | ||||
1207 | static int smu_smc_hw_cleanup(struct smu_context *smu) | |||
1208 | { | |||
1209 | struct amdgpu_device *adev = smu->adev; | |||
1210 | int ret = 0; | |||
1211 | ||||
1212 | cancel_work_sync(&smu->throttling_logging_work); | |||
1213 | cancel_work_sync(&smu->interrupt_work); | |||
1214 | ||||
1215 | ret = smu_disable_thermal_alert(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->disable_thermal_alert ? (smu)->ppt_funcs->disable_thermal_alert(smu) : 0) : - 22); | |||
1216 | if (ret) { | |||
1217 | dev_err(adev->dev, "Fail to disable thermal alert!\n")printf("drm:pid%d:%s *ERROR* " "Fail to disable thermal alert!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1218 | return ret; | |||
1219 | } | |||
1220 | ||||
1221 | ret = smu_disable_dpms(smu); | |||
1222 | if (ret) { | |||
1223 | dev_err(adev->dev, "Fail to disable dpm features!\n")printf("drm:pid%d:%s *ERROR* " "Fail to disable dpm features!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1224 | return ret; | |||
1225 | } | |||
1226 | ||||
1227 | return 0; | |||
1228 | } | |||
1229 | ||||
1230 | static int smu_hw_fini(void *handle) | |||
1231 | { | |||
1232 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |||
1233 | struct smu_context *smu = &adev->smu; | |||
1234 | ||||
1235 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))&& !amdgpu_sriov_is_pp_one_vf(adev)((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)) | |||
1236 | return 0; | |||
1237 | ||||
1238 | if (smu->is_apu) { | |||
1239 | smu_powergate_sdma(&adev->smu, true)((&adev->smu)->ppt_funcs ? ((&adev->smu)-> ppt_funcs->powergate_sdma ? (&adev->smu)->ppt_funcs ->powergate_sdma(&adev->smu, 1) : 0) : -22); | |||
1240 | smu_dpm_set_vcn_enable(smu, false0); | |||
1241 | smu_dpm_set_jpeg_enable(smu, false0); | |||
1242 | } | |||
1243 | ||||
1244 | if (!smu->pm_enabled) | |||
1245 | return 0; | |||
1246 | ||||
1247 | adev->pm.dpm_enabled = false0; | |||
1248 | ||||
1249 | return smu_smc_hw_cleanup(smu); | |||
1250 | } | |||
1251 | ||||
1252 | int smu_reset(struct smu_context *smu) | |||
1253 | { | |||
1254 | struct amdgpu_device *adev = smu->adev; | |||
1255 | int ret; | |||
1256 | ||||
1257 | amdgpu_gfx_off_ctrl(smu->adev, false0); | |||
1258 | ||||
1259 | ret = smu_hw_fini(adev); | |||
1260 | if (ret) | |||
1261 | return ret; | |||
1262 | ||||
1263 | ret = smu_hw_init(adev); | |||
1264 | if (ret) | |||
1265 | return ret; | |||
1266 | ||||
1267 | ret = smu_late_init(adev); | |||
1268 | if (ret) | |||
1269 | return ret; | |||
1270 | ||||
1271 | amdgpu_gfx_off_ctrl(smu->adev, true1); | |||
1272 | ||||
1273 | return 0; | |||
1274 | } | |||
1275 | ||||
1276 | static int smu_suspend(void *handle) | |||
1277 | { | |||
1278 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |||
1279 | struct smu_context *smu = &adev->smu; | |||
1280 | int ret; | |||
1281 | ||||
1282 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))&& !amdgpu_sriov_is_pp_one_vf(adev)((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)) | |||
1283 | return 0; | |||
1284 | ||||
1285 | if (!smu->pm_enabled) | |||
1286 | return 0; | |||
1287 | ||||
1288 | adev->pm.dpm_enabled = false0; | |||
1289 | ||||
1290 | ret = smu_smc_hw_cleanup(smu); | |||
1291 | if (ret) | |||
1292 | return ret; | |||
1293 | ||||
1294 | smu->watermarks_bitmap &= ~(WATERMARKS_LOADED(1 << 1)); | |||
1295 | ||||
1296 | if (smu->is_apu) | |||
1297 | smu_set_gfx_cgpg(&adev->smu, false0); | |||
1298 | ||||
1299 | return 0; | |||
1300 | } | |||
1301 | ||||
1302 | static int smu_resume(void *handle) | |||
1303 | { | |||
1304 | int ret; | |||
1305 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |||
1306 | struct smu_context *smu = &adev->smu; | |||
1307 | ||||
1308 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))&& !amdgpu_sriov_is_pp_one_vf(adev)((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)) | |||
1309 | return 0; | |||
1310 | ||||
1311 | if (!smu->pm_enabled) | |||
1312 | return 0; | |||
1313 | ||||
1314 | dev_info(adev->dev, "SMU is resuming...\n")do { } while(0); | |||
1315 | ||||
1316 | ret = smu_start_smc_engine(smu); | |||
1317 | if (ret) { | |||
1318 | dev_err(adev->dev, "SMC engine is not correctly up!\n")printf("drm:pid%d:%s *ERROR* " "SMC engine is not correctly up!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1319 | return ret; | |||
1320 | } | |||
1321 | ||||
1322 | ret = smu_smc_hw_setup(smu); | |||
1323 | if (ret) { | |||
1324 | dev_err(adev->dev, "Failed to setup smc hw!\n")printf("drm:pid%d:%s *ERROR* " "Failed to setup smc hw!\n", ( {struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1325 | return ret; | |||
1326 | } | |||
1327 | ||||
1328 | if (smu->is_apu) | |||
1329 | smu_set_gfx_cgpg(&adev->smu, true1); | |||
1330 | ||||
1331 | smu->disable_uclk_switch = 0; | |||
1332 | ||||
1333 | adev->pm.dpm_enabled = true1; | |||
1334 | ||||
1335 | dev_info(adev->dev, "SMU is resumed successfully!\n")do { } while(0); | |||
1336 | ||||
1337 | return 0; | |||
1338 | } | |||
1339 | ||||
1340 | int smu_display_configuration_change(struct smu_context *smu, | |||
1341 | const struct amd_pp_display_configuration *display_config) | |||
1342 | { | |||
1343 | int index = 0; | |||
1344 | int num_of_active_display = 0; | |||
1345 | ||||
1346 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1347 | return -EOPNOTSUPP45; | |||
1348 | ||||
1349 | if (!display_config) | |||
1350 | return -EINVAL22; | |||
1351 | ||||
1352 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1353 | ||||
1354 | smu_set_min_dcef_deep_sleep(smu,((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep(smu, display_config ->min_dcef_deep_sleep_set_clk / 100) : 0) : -22) | |||
1355 | display_config->min_dcef_deep_sleep_set_clk / 100)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep(smu, display_config ->min_dcef_deep_sleep_set_clk / 100) : 0) : -22); | |||
1356 | ||||
1357 | for (index = 0; index < display_config->num_path_including_non_display; index++) { | |||
1358 | if (display_config->displays[index].controller_id != 0) | |||
1359 | num_of_active_display++; | |||
1360 | } | |||
1361 | ||||
1362 | smu_set_active_display_count(smu, num_of_active_display)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_active_display_count ? (smu)->ppt_funcs->set_active_display_count(smu, num_of_active_display ) : 0) : -22); | |||
1363 | ||||
1364 | smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,((smu)->ppt_funcs ? ((smu)->ppt_funcs->store_cc6_data ? (smu)->ppt_funcs->store_cc6_data(smu, display_config ->cpu_pstate_separation_time, display_config->cpu_cc6_disable , display_config->cpu_pstate_disable, display_config->nb_pstate_switch_disable ) : 0) : -22) | |||
1365 | display_config->cpu_cc6_disable,((smu)->ppt_funcs ? ((smu)->ppt_funcs->store_cc6_data ? (smu)->ppt_funcs->store_cc6_data(smu, display_config ->cpu_pstate_separation_time, display_config->cpu_cc6_disable , display_config->cpu_pstate_disable, display_config->nb_pstate_switch_disable ) : 0) : -22) | |||
1366 | display_config->cpu_pstate_disable,((smu)->ppt_funcs ? ((smu)->ppt_funcs->store_cc6_data ? (smu)->ppt_funcs->store_cc6_data(smu, display_config ->cpu_pstate_separation_time, display_config->cpu_cc6_disable , display_config->cpu_pstate_disable, display_config->nb_pstate_switch_disable ) : 0) : -22) | |||
1367 | display_config->nb_pstate_switch_disable)((smu)->ppt_funcs ? ((smu)->ppt_funcs->store_cc6_data ? (smu)->ppt_funcs->store_cc6_data(smu, display_config ->cpu_pstate_separation_time, display_config->cpu_cc6_disable , display_config->cpu_pstate_disable, display_config->nb_pstate_switch_disable ) : 0) : -22); | |||
1368 | ||||
1369 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1370 | ||||
1371 | return 0; | |||
1372 | } | |||
1373 | ||||
1374 | static int smu_get_clock_info(struct smu_context *smu, | |||
1375 | struct smu_clock_info *clk_info, | |||
1376 | enum smu_perf_level_designation designation) | |||
1377 | { | |||
1378 | int ret; | |||
1379 | struct smu_performance_level level = {0}; | |||
1380 | ||||
1381 | if (!clk_info
| |||
1382 | return -EINVAL22; | |||
1383 | ||||
1384 | ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_perf_level ? (smu)->ppt_funcs->get_perf_level(smu, PERF_LEVEL_ACTIVITY , &level) : 0) : -22); | |||
1385 | if (ret
| |||
1386 | return -EINVAL22; | |||
1387 | ||||
1388 | clk_info->min_mem_clk = level.memory_clock; | |||
1389 | clk_info->min_eng_clk = level.core_clock; | |||
1390 | clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; | |||
1391 | ||||
1392 | ret = smu_get_perf_level(smu, designation, &level)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_perf_level ? (smu)->ppt_funcs->get_perf_level(smu, designation, & level) : 0) : -22); | |||
1393 | if (ret
| |||
1394 | return -EINVAL22; | |||
1395 | ||||
1396 | clk_info->min_mem_clk = level.memory_clock; | |||
1397 | clk_info->min_eng_clk = level.core_clock; | |||
1398 | clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; | |||
1399 | ||||
1400 | return 0; | |||
1401 | } | |||
1402 | ||||
1403 | int smu_get_current_clocks(struct smu_context *smu, | |||
1404 | struct amd_pp_clock_info *clocks) | |||
1405 | { | |||
1406 | struct amd_pp_simple_clock_info simple_clocks = {0}; | |||
1407 | struct smu_clock_info hw_clocks; | |||
1408 | int ret = 0; | |||
1409 | ||||
1410 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
| ||||
1411 | return -EOPNOTSUPP45; | |||
1412 | ||||
1413 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1414 | ||||
1415 | smu_get_dal_power_level(smu, &simple_clocks)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_dal_power_level ? (smu)->ppt_funcs->get_dal_power_level(smu, &simple_clocks ) : 0) : -22); | |||
1416 | ||||
1417 | if (smu->support_power_containment) | |||
1418 | ret = smu_get_clock_info(smu, &hw_clocks, | |||
1419 | PERF_LEVEL_POWER_CONTAINMENT); | |||
1420 | else | |||
1421 | ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY); | |||
1422 | ||||
1423 | if (ret
| |||
1424 | dev_err(smu->adev->dev, "Error in smu_get_clock_info\n")printf("drm:pid%d:%s *ERROR* " "Error in smu_get_clock_info\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1425 | goto failed; | |||
1426 | } | |||
1427 | ||||
1428 | clocks->min_engine_clock = hw_clocks.min_eng_clk; | |||
1429 | clocks->max_engine_clock = hw_clocks.max_eng_clk; | |||
| ||||
1430 | clocks->min_memory_clock = hw_clocks.min_mem_clk; | |||
1431 | clocks->max_memory_clock = hw_clocks.max_mem_clk; | |||
1432 | clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; | |||
1433 | clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; | |||
1434 | clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; | |||
1435 | clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; | |||
1436 | ||||
1437 | if (simple_clocks.level == 0) | |||
1438 | clocks->max_clocks_state = PP_DAL_POWERLEVEL_7; | |||
1439 | else | |||
1440 | clocks->max_clocks_state = simple_clocks.level; | |||
1441 | ||||
1442 | if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_current_shallow_sleep_clocks ? (smu)->ppt_funcs->get_current_shallow_sleep_clocks(smu , &hw_clocks) : 0) : -22)) { | |||
1443 | clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; | |||
1444 | clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; | |||
1445 | } | |||
1446 | ||||
1447 | failed: | |||
1448 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1449 | return ret; | |||
1450 | } | |||
1451 | ||||
1452 | static int smu_set_clockgating_state(void *handle, | |||
1453 | enum amd_clockgating_state state) | |||
1454 | { | |||
1455 | return 0; | |||
1456 | } | |||
1457 | ||||
1458 | static int smu_set_powergating_state(void *handle, | |||
1459 | enum amd_powergating_state state) | |||
1460 | { | |||
1461 | return 0; | |||
1462 | } | |||
1463 | ||||
1464 | static int smu_enable_umd_pstate(void *handle, | |||
1465 | enum amd_dpm_forced_level *level) | |||
1466 | { | |||
1467 | uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | | |||
1468 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | | |||
1469 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | | |||
1470 | AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; | |||
1471 | ||||
1472 | struct smu_context *smu = (struct smu_context*)(handle); | |||
1473 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |||
1474 | ||||
1475 | if (!smu->is_apu && !smu_dpm_ctx->dpm_context) | |||
1476 | return -EINVAL22; | |||
1477 | ||||
1478 | if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { | |||
1479 | /* enter umd pstate, save current level, disable gfx cg*/ | |||
1480 | if (*level & profile_mode_mask) { | |||
1481 | smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; | |||
1482 | smu_dpm_ctx->enable_umd_pstate = true1; | |||
1483 | amdgpu_device_ip_set_powergating_state(smu->adev, | |||
1484 | AMD_IP_BLOCK_TYPE_GFX, | |||
1485 | AMD_PG_STATE_UNGATE); | |||
1486 | amdgpu_device_ip_set_clockgating_state(smu->adev, | |||
1487 | AMD_IP_BLOCK_TYPE_GFX, | |||
1488 | AMD_CG_STATE_UNGATE); | |||
1489 | smu_gfx_ulv_control(smu, false)((smu)->ppt_funcs ? ((smu)->ppt_funcs->gfx_ulv_control ? (smu)->ppt_funcs->gfx_ulv_control(smu, 0) : 0) : -22 ); | |||
1490 | smu_deep_sleep_control(smu, false)((smu)->ppt_funcs ? ((smu)->ppt_funcs->deep_sleep_control ? (smu)->ppt_funcs->deep_sleep_control(smu, 0) : 0) : - 22); | |||
1491 | } | |||
1492 | } else { | |||
1493 | /* exit umd pstate, restore level, enable gfx cg*/ | |||
1494 | if (!(*level & profile_mode_mask)) { | |||
1495 | if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) | |||
1496 | *level = smu_dpm_ctx->saved_dpm_level; | |||
1497 | smu_dpm_ctx->enable_umd_pstate = false0; | |||
1498 | smu_deep_sleep_control(smu, true)((smu)->ppt_funcs ? ((smu)->ppt_funcs->deep_sleep_control ? (smu)->ppt_funcs->deep_sleep_control(smu, 1) : 0) : - 22); | |||
1499 | smu_gfx_ulv_control(smu, true)((smu)->ppt_funcs ? ((smu)->ppt_funcs->gfx_ulv_control ? (smu)->ppt_funcs->gfx_ulv_control(smu, 1) : 0) : -22 ); | |||
1500 | amdgpu_device_ip_set_clockgating_state(smu->adev, | |||
1501 | AMD_IP_BLOCK_TYPE_GFX, | |||
1502 | AMD_CG_STATE_GATE); | |||
1503 | amdgpu_device_ip_set_powergating_state(smu->adev, | |||
1504 | AMD_IP_BLOCK_TYPE_GFX, | |||
1505 | AMD_PG_STATE_GATE); | |||
1506 | } | |||
1507 | } | |||
1508 | ||||
1509 | return 0; | |||
1510 | } | |||
1511 | ||||
1512 | static int smu_adjust_power_state_dynamic(struct smu_context *smu, | |||
1513 | enum amd_dpm_forced_level level, | |||
1514 | bool_Bool skip_display_settings) | |||
1515 | { | |||
1516 | int ret = 0; | |||
1517 | int index = 0; | |||
1518 | long workload; | |||
1519 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |||
1520 | ||||
1521 | if (!skip_display_settings) { | |||
1522 | ret = smu_display_config_changed(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed(smu) : 0) : -22); | |||
1523 | if (ret) { | |||
1524 | dev_err(smu->adev->dev, "Failed to change display config!")printf("drm:pid%d:%s *ERROR* " "Failed to change display config!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1525 | return ret; | |||
1526 | } | |||
1527 | } | |||
1528 | ||||
1529 | ret = smu_apply_clocks_adjust_rules(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules(smu) : 0 ) : -22); | |||
1530 | if (ret) { | |||
1531 | dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!")printf("drm:pid%d:%s *ERROR* " "Failed to apply clocks adjust rules!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1532 | return ret; | |||
1533 | } | |||
1534 | ||||
1535 | if (!skip_display_settings) { | |||
1536 | ret = smu_notify_smc_display_config(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->notify_smc_display_config ? (smu)->ppt_funcs->notify_smc_display_config(smu) : 0 ) : -22); | |||
1537 | if (ret) { | |||
1538 | dev_err(smu->adev->dev, "Failed to notify smc display config!")printf("drm:pid%d:%s *ERROR* " "Failed to notify smc display config!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1539 | return ret; | |||
1540 | } | |||
1541 | } | |||
1542 | ||||
1543 | if (smu_dpm_ctx->dpm_level != level) { | |||
1544 | ret = smu_asic_set_performance_level(smu, level)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_performance_level ? (smu)->ppt_funcs->set_performance_level(smu, level) : -22) : -22); | |||
1545 | if (ret) { | |||
1546 | dev_err(smu->adev->dev, "Failed to set performance level!")printf("drm:pid%d:%s *ERROR* " "Failed to set performance level!" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1547 | return ret; | |||
1548 | } | |||
1549 | ||||
1550 | /* update the saved copy */ | |||
1551 | smu_dpm_ctx->dpm_level = level; | |||
1552 | } | |||
1553 | ||||
1554 | if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { | |||
1555 | index = fls(smu->workload_mask); | |||
1556 | index = index > 0 && index <= WORKLOAD_POLICY_MAX7 ? index - 1 : 0; | |||
1557 | workload = smu->workload_setting[index]; | |||
1558 | ||||
1559 | if (smu->power_profile_mode != workload) | |||
1560 | smu_set_power_profile_mode(smu, &workload, 0, false0); | |||
1561 | } | |||
1562 | ||||
1563 | return ret; | |||
1564 | } | |||
1565 | ||||
1566 | int smu_handle_task(struct smu_context *smu, | |||
1567 | enum amd_dpm_forced_level level, | |||
1568 | enum amd_pp_task task_id, | |||
1569 | bool_Bool lock_needed) | |||
1570 | { | |||
1571 | int ret = 0; | |||
1572 | ||||
1573 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1574 | return -EOPNOTSUPP45; | |||
1575 | ||||
1576 | if (lock_needed) | |||
1577 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1578 | ||||
1579 | switch (task_id) { | |||
1580 | case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: | |||
1581 | ret = smu_pre_display_config_changed(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed(smu) : 0 ) : -22); | |||
1582 | if (ret) | |||
1583 | goto out; | |||
1584 | ret = smu_set_cpu_power_state(smu)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state(smu) : 0) : -22 ); | |||
1585 | if (ret) | |||
1586 | goto out; | |||
1587 | ret = smu_adjust_power_state_dynamic(smu, level, false0); | |||
1588 | break; | |||
1589 | case AMD_PP_TASK_COMPLETE_INIT: | |||
1590 | case AMD_PP_TASK_READJUST_POWER_STATE: | |||
1591 | ret = smu_adjust_power_state_dynamic(smu, level, true1); | |||
1592 | break; | |||
1593 | default: | |||
1594 | break; | |||
1595 | } | |||
1596 | ||||
1597 | out: | |||
1598 | if (lock_needed) | |||
1599 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1600 | ||||
1601 | return ret; | |||
1602 | } | |||
1603 | ||||
1604 | int smu_switch_power_profile(struct smu_context *smu, | |||
1605 | enum PP_SMC_POWER_PROFILE type, | |||
1606 | bool_Bool en) | |||
1607 | { | |||
1608 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |||
1609 | long workload; | |||
1610 | uint32_t index; | |||
1611 | ||||
1612 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1613 | return -EOPNOTSUPP45; | |||
1614 | ||||
1615 | if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) | |||
1616 | return -EINVAL22; | |||
1617 | ||||
1618 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1619 | ||||
1620 | if (!en) { | |||
1621 | smu->workload_mask &= ~(1 << smu->workload_prority[type]); | |||
1622 | index = fls(smu->workload_mask); | |||
1623 | index = index > 0 && index <= WORKLOAD_POLICY_MAX7 ? index - 1 : 0; | |||
1624 | workload = smu->workload_setting[index]; | |||
1625 | } else { | |||
1626 | smu->workload_mask |= (1 << smu->workload_prority[type]); | |||
1627 | index = fls(smu->workload_mask); | |||
1628 | index = index <= WORKLOAD_POLICY_MAX7 ? index - 1 : 0; | |||
1629 | workload = smu->workload_setting[index]; | |||
1630 | } | |||
1631 | ||||
1632 | if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) | |||
1633 | smu_set_power_profile_mode(smu, &workload, 0, false0); | |||
1634 | ||||
1635 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1636 | ||||
1637 | return 0; | |||
1638 | } | |||
1639 | ||||
1640 | enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) | |||
1641 | { | |||
1642 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |||
1643 | enum amd_dpm_forced_level level; | |||
1644 | ||||
1645 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1646 | return -EOPNOTSUPP45; | |||
1647 | ||||
1648 | if (!smu->is_apu && !smu_dpm_ctx->dpm_context) | |||
1649 | return -EINVAL22; | |||
1650 | ||||
1651 | mutex_lock(&(smu->mutex))rw_enter_write(&(smu->mutex)); | |||
1652 | level = smu_dpm_ctx->dpm_level; | |||
1653 | mutex_unlock(&(smu->mutex))rw_exit_write(&(smu->mutex)); | |||
1654 | ||||
1655 | return level; | |||
1656 | } | |||
1657 | ||||
1658 | int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) | |||
1659 | { | |||
1660 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |||
1661 | int ret = 0; | |||
1662 | ||||
1663 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1664 | return -EOPNOTSUPP45; | |||
1665 | ||||
1666 | if (!smu->is_apu && !smu_dpm_ctx->dpm_context) | |||
1667 | return -EINVAL22; | |||
1668 | ||||
1669 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1670 | ||||
1671 | ret = smu_enable_umd_pstate(smu, &level); | |||
1672 | if (ret) { | |||
1673 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1674 | return ret; | |||
1675 | } | |||
1676 | ||||
1677 | ret = smu_handle_task(smu, level, | |||
1678 | AMD_PP_TASK_READJUST_POWER_STATE, | |||
1679 | false0); | |||
1680 | ||||
1681 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1682 | ||||
1683 | return ret; | |||
1684 | } | |||
1685 | ||||
1686 | int smu_set_display_count(struct smu_context *smu, uint32_t count) | |||
1687 | { | |||
1688 | int ret = 0; | |||
1689 | ||||
1690 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1691 | return -EOPNOTSUPP45; | |||
1692 | ||||
1693 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1694 | ret = smu_init_display_count(smu, count)((smu)->ppt_funcs ? ((smu)->ppt_funcs->init_display_count ? (smu)->ppt_funcs->init_display_count(smu, count) : 0 ) : -22); | |||
1695 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1696 | ||||
1697 | return ret; | |||
1698 | } | |||
1699 | ||||
1700 | int smu_force_clk_levels(struct smu_context *smu, | |||
1701 | enum smu_clk_type clk_type, | |||
1702 | uint32_t mask) | |||
1703 | { | |||
1704 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | |||
1705 | int ret = 0; | |||
1706 | ||||
1707 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1708 | return -EOPNOTSUPP45; | |||
1709 | ||||
1710 | if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { | |||
1711 | dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n")do { } while(0); | |||
1712 | return -EINVAL22; | |||
1713 | } | |||
1714 | ||||
1715 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1716 | ||||
1717 | if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) | |||
1718 | ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); | |||
1719 | ||||
1720 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1721 | ||||
1722 | return ret; | |||
1723 | } | |||
1724 | ||||
1725 | /* | |||
1726 | * On system suspending or resetting, the dpm_enabled | |||
1727 | * flag will be cleared. So that those SMU services which | |||
1728 | * are not supported will be gated. | |||
1729 | * However, the mp1 state setting should still be granted | |||
1730 | * even if the dpm_enabled cleared. | |||
1731 | */ | |||
1732 | int smu_set_mp1_state(struct smu_context *smu, | |||
1733 | enum pp_mp1_state mp1_state) | |||
1734 | { | |||
1735 | uint16_t msg; | |||
1736 | int ret; | |||
1737 | ||||
1738 | if (!smu->pm_enabled) | |||
1739 | return -EOPNOTSUPP45; | |||
1740 | ||||
1741 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1742 | ||||
1743 | switch (mp1_state) { | |||
1744 | case PP_MP1_STATE_SHUTDOWN: | |||
1745 | msg = SMU_MSG_PrepareMp1ForShutdown; | |||
1746 | break; | |||
1747 | case PP_MP1_STATE_UNLOAD: | |||
1748 | msg = SMU_MSG_PrepareMp1ForUnload; | |||
1749 | break; | |||
1750 | case PP_MP1_STATE_RESET: | |||
1751 | msg = SMU_MSG_PrepareMp1ForReset; | |||
1752 | break; | |||
1753 | case PP_MP1_STATE_NONE: | |||
1754 | default: | |||
1755 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1756 | return 0; | |||
1757 | } | |||
1758 | ||||
1759 | ret = smu_send_smc_msg(smu, msg, NULL)((smu)->ppt_funcs ? ((smu)->ppt_funcs->send_smc_msg ? (smu)->ppt_funcs->send_smc_msg(smu, msg, ((void *)0)) : 0) : -22); | |||
1760 | /* some asics may not support those messages */ | |||
1761 | if (ret == -EINVAL22) | |||
1762 | ret = 0; | |||
1763 | if (ret) | |||
1764 | dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n")printf("drm:pid%d:%s *ERROR* " "[PrepareMp1] Failed!\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__); | |||
1765 | ||||
1766 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1767 | ||||
1768 | return ret; | |||
1769 | } | |||
1770 | ||||
1771 | int smu_set_df_cstate(struct smu_context *smu, | |||
1772 | enum pp_df_cstate state) | |||
1773 | { | |||
1774 | int ret = 0; | |||
1775 | ||||
1776 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1777 | return -EOPNOTSUPP45; | |||
1778 | ||||
1779 | if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) | |||
1780 | return 0; | |||
1781 | ||||
1782 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1783 | ||||
1784 | ret = smu->ppt_funcs->set_df_cstate(smu, state); | |||
1785 | if (ret) | |||
1786 | dev_err(smu->adev->dev, "[SetDfCstate] failed!\n")printf("drm:pid%d:%s *ERROR* " "[SetDfCstate] failed!\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__); | |||
1787 | ||||
1788 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1789 | ||||
1790 | return ret; | |||
1791 | } | |||
1792 | ||||
1793 | int smu_allow_xgmi_power_down(struct smu_context *smu, bool_Bool en) | |||
1794 | { | |||
1795 | int ret = 0; | |||
1796 | ||||
1797 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1798 | return -EOPNOTSUPP45; | |||
1799 | ||||
1800 | if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) | |||
1801 | return 0; | |||
1802 | ||||
1803 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1804 | ||||
1805 | ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); | |||
1806 | if (ret) | |||
1807 | dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n")printf("drm:pid%d:%s *ERROR* " "[AllowXgmiPowerDown] failed!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
1808 | ||||
1809 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1810 | ||||
1811 | return ret; | |||
1812 | } | |||
1813 | ||||
1814 | int smu_write_watermarks_table(struct smu_context *smu) | |||
1815 | { | |||
1816 | int ret = 0; | |||
1817 | ||||
1818 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1819 | return -EOPNOTSUPP45; | |||
1820 | ||||
1821 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1822 | ||||
1823 | ret = smu_set_watermarks_table(smu, NULL)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table(smu, ((void * )0)) : 0) : -22); | |||
1824 | ||||
1825 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1826 | ||||
1827 | return ret; | |||
1828 | } | |||
1829 | ||||
1830 | int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, | |||
1831 | struct pp_smu_wm_range_sets *clock_ranges) | |||
1832 | { | |||
1833 | int ret = 0; | |||
1834 | ||||
1835 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1836 | return -EOPNOTSUPP45; | |||
1837 | ||||
1838 | if (smu->disable_watermark) | |||
1839 | return 0; | |||
1840 | ||||
1841 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1842 | ||||
1843 | ret = smu_set_watermarks_table(smu, clock_ranges)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table(smu, clock_ranges ) : 0) : -22); | |||
1844 | ||||
1845 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1846 | ||||
1847 | return ret; | |||
1848 | } | |||
1849 | ||||
1850 | int smu_set_ac_dc(struct smu_context *smu) | |||
1851 | { | |||
1852 | int ret = 0; | |||
1853 | ||||
1854 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1855 | return -EOPNOTSUPP45; | |||
1856 | ||||
1857 | /* controlled by firmware */ | |||
1858 | if (smu->dc_controlled_by_gpio) | |||
1859 | return 0; | |||
1860 | ||||
1861 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1862 | ret = smu_set_power_source(smu,((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source(smu, smu->adev ->pm.ac_power ? SMU_POWER_SOURCE_AC : SMU_POWER_SOURCE_DC) : 0) : -22) | |||
1863 | smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source(smu, smu->adev ->pm.ac_power ? SMU_POWER_SOURCE_AC : SMU_POWER_SOURCE_DC) : 0) : -22) | |||
1864 | SMU_POWER_SOURCE_DC)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source(smu, smu->adev ->pm.ac_power ? SMU_POWER_SOURCE_AC : SMU_POWER_SOURCE_DC) : 0) : -22); | |||
1865 | if (ret) | |||
1866 | dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",printf("drm:pid%d:%s *ERROR* " "Failed to switch to %s mode!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , smu-> adev->pm.ac_power ? "AC" : "DC") | |||
1867 | smu->adev->pm.ac_power ? "AC" : "DC")printf("drm:pid%d:%s *ERROR* " "Failed to switch to %s mode!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , smu-> adev->pm.ac_power ? "AC" : "DC"); | |||
1868 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1869 | ||||
1870 | return ret; | |||
1871 | } | |||
1872 | ||||
1873 | const struct amd_ip_funcs smu_ip_funcs = { | |||
1874 | .name = "smu", | |||
1875 | .early_init = smu_early_init, | |||
1876 | .late_init = smu_late_init, | |||
1877 | .sw_init = smu_sw_init, | |||
1878 | .sw_fini = smu_sw_fini, | |||
1879 | .hw_init = smu_hw_init, | |||
1880 | .hw_fini = smu_hw_fini, | |||
1881 | .suspend = smu_suspend, | |||
1882 | .resume = smu_resume, | |||
1883 | .is_idle = NULL((void *)0), | |||
1884 | .check_soft_reset = NULL((void *)0), | |||
1885 | .wait_for_idle = NULL((void *)0), | |||
1886 | .soft_reset = NULL((void *)0), | |||
1887 | .set_clockgating_state = smu_set_clockgating_state, | |||
1888 | .set_powergating_state = smu_set_powergating_state, | |||
1889 | .enable_umd_pstate = smu_enable_umd_pstate, | |||
1890 | }; | |||
1891 | ||||
1892 | const struct amdgpu_ip_block_version smu_v11_0_ip_block = | |||
1893 | { | |||
1894 | .type = AMD_IP_BLOCK_TYPE_SMC, | |||
1895 | .major = 11, | |||
1896 | .minor = 0, | |||
1897 | .rev = 0, | |||
1898 | .funcs = &smu_ip_funcs, | |||
1899 | }; | |||
1900 | ||||
1901 | const struct amdgpu_ip_block_version smu_v12_0_ip_block = | |||
1902 | { | |||
1903 | .type = AMD_IP_BLOCK_TYPE_SMC, | |||
1904 | .major = 12, | |||
1905 | .minor = 0, | |||
1906 | .rev = 0, | |||
1907 | .funcs = &smu_ip_funcs, | |||
1908 | }; | |||
1909 | ||||
1910 | int smu_load_microcode(struct smu_context *smu) | |||
1911 | { | |||
1912 | int ret = 0; | |||
1913 | ||||
1914 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1915 | return -EOPNOTSUPP45; | |||
1916 | ||||
1917 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1918 | ||||
1919 | if (smu->ppt_funcs->load_microcode) | |||
1920 | ret = smu->ppt_funcs->load_microcode(smu); | |||
1921 | ||||
1922 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1923 | ||||
1924 | return ret; | |||
1925 | } | |||
1926 | ||||
1927 | int smu_check_fw_status(struct smu_context *smu) | |||
1928 | { | |||
1929 | int ret = 0; | |||
1930 | ||||
1931 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1932 | return -EOPNOTSUPP45; | |||
1933 | ||||
1934 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1935 | ||||
1936 | if (smu->ppt_funcs->check_fw_status) | |||
1937 | ret = smu->ppt_funcs->check_fw_status(smu); | |||
1938 | ||||
1939 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1940 | ||||
1941 | return ret; | |||
1942 | } | |||
1943 | ||||
1944 | int smu_set_gfx_cgpg(struct smu_context *smu, bool_Bool enabled) | |||
1945 | { | |||
1946 | int ret = 0; | |||
1947 | ||||
1948 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1949 | ||||
1950 | if (smu->ppt_funcs->set_gfx_cgpg) | |||
1951 | ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); | |||
1952 | ||||
1953 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1954 | ||||
1955 | return ret; | |||
1956 | } | |||
1957 | ||||
1958 | int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) | |||
1959 | { | |||
1960 | int ret = 0; | |||
1961 | ||||
1962 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1963 | return -EOPNOTSUPP45; | |||
1964 | ||||
1965 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1966 | ||||
1967 | if (smu->ppt_funcs->set_fan_speed_rpm) | |||
1968 | ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); | |||
1969 | ||||
1970 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1971 | ||||
1972 | return ret; | |||
1973 | } | |||
1974 | ||||
1975 | int smu_get_power_limit(struct smu_context *smu, | |||
1976 | uint32_t *limit, | |||
1977 | bool_Bool max_setting) | |||
1978 | { | |||
1979 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1980 | return -EOPNOTSUPP45; | |||
1981 | ||||
1982 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1983 | ||||
1984 | *limit = (max_setting ? smu->max_power_limit : smu->current_power_limit); | |||
1985 | ||||
1986 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
1987 | ||||
1988 | return 0; | |||
1989 | } | |||
1990 | ||||
1991 | int smu_set_power_limit(struct smu_context *smu, uint32_t limit) | |||
1992 | { | |||
1993 | int ret = 0; | |||
1994 | ||||
1995 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
1996 | return -EOPNOTSUPP45; | |||
1997 | ||||
1998 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
1999 | ||||
2000 | if (limit > smu->max_power_limit) { | |||
2001 | dev_err(smu->adev->dev,printf("drm:pid%d:%s *ERROR* " "New power limit (%d) is over the max allowed %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , limit, smu->max_power_limit) | |||
2002 | "New power limit (%d) is over the max allowed %d\n",printf("drm:pid%d:%s *ERROR* " "New power limit (%d) is over the max allowed %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , limit, smu->max_power_limit) | |||
2003 | limit, smu->max_power_limit)printf("drm:pid%d:%s *ERROR* " "New power limit (%d) is over the max allowed %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , limit, smu->max_power_limit); | |||
2004 | ret = -EINVAL22; | |||
2005 | goto out; | |||
2006 | } | |||
2007 | ||||
2008 | if (!limit) | |||
2009 | limit = smu->current_power_limit; | |||
2010 | ||||
2011 | if (smu->ppt_funcs->set_power_limit) | |||
2012 | ret = smu->ppt_funcs->set_power_limit(smu, limit); | |||
2013 | ||||
2014 | out: | |||
2015 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2016 | ||||
2017 | return ret; | |||
2018 | } | |||
2019 | ||||
2020 | int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) | |||
2021 | { | |||
2022 | int ret = 0; | |||
2023 | ||||
2024 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2025 | return -EOPNOTSUPP45; | |||
2026 | ||||
2027 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2028 | ||||
2029 | if (smu->ppt_funcs->print_clk_levels) | |||
2030 | ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); | |||
2031 | ||||
2032 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2033 | ||||
2034 | return ret; | |||
2035 | } | |||
2036 | ||||
2037 | int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type) | |||
2038 | { | |||
2039 | int ret = 0; | |||
2040 | ||||
2041 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2042 | return -EOPNOTSUPP45; | |||
2043 | ||||
2044 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2045 | ||||
2046 | if (smu->ppt_funcs->get_od_percentage) | |||
2047 | ret = smu->ppt_funcs->get_od_percentage(smu, type); | |||
2048 | ||||
2049 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2050 | ||||
2051 | return ret; | |||
2052 | } | |||
2053 | ||||
2054 | int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value) | |||
2055 | { | |||
2056 | int ret = 0; | |||
2057 | ||||
2058 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2059 | return -EOPNOTSUPP45; | |||
2060 | ||||
2061 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2062 | ||||
2063 | if (smu->ppt_funcs->set_od_percentage) | |||
2064 | ret = smu->ppt_funcs->set_od_percentage(smu, type, value); | |||
2065 | ||||
2066 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2067 | ||||
2068 | return ret; | |||
2069 | } | |||
2070 | ||||
2071 | int smu_od_edit_dpm_table(struct smu_context *smu, | |||
2072 | enum PP_OD_DPM_TABLE_COMMAND type, | |||
2073 | long *input, uint32_t size) | |||
2074 | { | |||
2075 | int ret = 0; | |||
2076 | ||||
2077 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2078 | return -EOPNOTSUPP45; | |||
2079 | ||||
2080 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2081 | ||||
2082 | if (smu->ppt_funcs->od_edit_dpm_table) { | |||
2083 | ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); | |||
2084 | if (!ret && (type == PP_OD_COMMIT_DPM_TABLE)) | |||
2085 | ret = smu_handle_task(smu, | |||
2086 | smu->smu_dpm.dpm_level, | |||
2087 | AMD_PP_TASK_READJUST_POWER_STATE, | |||
2088 | false0); | |||
2089 | } | |||
2090 | ||||
2091 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2092 | ||||
2093 | return ret; | |||
2094 | } | |||
2095 | ||||
2096 | int smu_read_sensor(struct smu_context *smu, | |||
2097 | enum amd_pp_sensors sensor, | |||
2098 | void *data, uint32_t *size) | |||
2099 | { | |||
2100 | struct smu_umd_pstate_table *pstate_table = | |||
2101 | &smu->pstate_table; | |||
2102 | int ret = 0; | |||
2103 | ||||
2104 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2105 | return -EOPNOTSUPP45; | |||
2106 | ||||
2107 | if (!data || !size) | |||
2108 | return -EINVAL22; | |||
2109 | ||||
2110 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2111 | ||||
2112 | if (smu->ppt_funcs->read_sensor) | |||
2113 | if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) | |||
2114 | goto unlock; | |||
2115 | ||||
2116 | switch (sensor) { | |||
2117 | case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: | |||
2118 | *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; | |||
2119 | *size = 4; | |||
2120 | break; | |||
2121 | case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: | |||
2122 | *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; | |||
2123 | *size = 4; | |||
2124 | break; | |||
2125 | case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: | |||
2126 | ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2)((smu)->ppt_funcs ? ((smu)->ppt_funcs->get_enabled_mask ? (smu)->ppt_funcs->get_enabled_mask(smu, (uint32_t *) data, 2) : 0) : -22); | |||
2127 | *size = 8; | |||
2128 | break; | |||
2129 | case AMDGPU_PP_SENSOR_UVD_POWER: | |||
2130 | *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)((smu)->ppt_funcs ? ((smu)->ppt_funcs->feature_is_enabled ? (smu)->ppt_funcs->feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT ) : 0) : -22) ? 1 : 0; | |||
2131 | *size = 4; | |||
2132 | break; | |||
2133 | case AMDGPU_PP_SENSOR_VCE_POWER: | |||
2134 | *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT)((smu)->ppt_funcs ? ((smu)->ppt_funcs->feature_is_enabled ? (smu)->ppt_funcs->feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT ) : 0) : -22) ? 1 : 0; | |||
2135 | *size = 4; | |||
2136 | break; | |||
2137 | case AMDGPU_PP_SENSOR_VCN_POWER_STATE: | |||
2138 | *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated)({ typeof(*(&smu->smu_power.power_gate.vcn_gated)) __tmp = *(volatile typeof(*(&smu->smu_power.power_gate.vcn_gated )) *)&(*(&smu->smu_power.power_gate.vcn_gated)); membar_datadep_consumer (); __tmp; }) ? 0: 1; | |||
2139 | *size = 4; | |||
2140 | break; | |||
2141 | case AMDGPU_PP_SENSOR_MIN_FAN_RPM: | |||
2142 | *(uint32_t *)data = 0; | |||
2143 | *size = 4; | |||
2144 | break; | |||
2145 | default: | |||
2146 | *size = 0; | |||
2147 | ret = -EOPNOTSUPP45; | |||
2148 | break; | |||
2149 | } | |||
2150 | ||||
2151 | unlock: | |||
2152 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2153 | ||||
2154 | return ret; | |||
2155 | } | |||
2156 | ||||
2157 | int smu_get_power_profile_mode(struct smu_context *smu, char *buf) | |||
2158 | { | |||
2159 | int ret = 0; | |||
2160 | ||||
2161 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2162 | return -EOPNOTSUPP45; | |||
2163 | ||||
2164 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2165 | ||||
2166 | if (smu->ppt_funcs->get_power_profile_mode) | |||
2167 | ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); | |||
2168 | ||||
2169 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2170 | ||||
2171 | return ret; | |||
2172 | } | |||
2173 | ||||
2174 | int smu_set_power_profile_mode(struct smu_context *smu, | |||
2175 | long *param, | |||
2176 | uint32_t param_size, | |||
2177 | bool_Bool lock_needed) | |||
2178 | { | |||
2179 | int ret = 0; | |||
2180 | ||||
2181 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2182 | return -EOPNOTSUPP45; | |||
2183 | ||||
2184 | if (lock_needed) | |||
2185 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2186 | ||||
2187 | if (smu->ppt_funcs->set_power_profile_mode) | |||
2188 | ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); | |||
2189 | ||||
2190 | if (lock_needed) | |||
2191 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2192 | ||||
2193 | return ret; | |||
2194 | } | |||
2195 | ||||
2196 | ||||
2197 | int smu_get_fan_control_mode(struct smu_context *smu) | |||
2198 | { | |||
2199 | int ret = 0; | |||
2200 | ||||
2201 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2202 | return -EOPNOTSUPP45; | |||
2203 | ||||
2204 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2205 | ||||
2206 | if (smu->ppt_funcs->get_fan_control_mode) | |||
2207 | ret = smu->ppt_funcs->get_fan_control_mode(smu); | |||
2208 | ||||
2209 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2210 | ||||
2211 | return ret; | |||
2212 | } | |||
2213 | ||||
2214 | int smu_set_fan_control_mode(struct smu_context *smu, int value) | |||
2215 | { | |||
2216 | int ret = 0; | |||
2217 | ||||
2218 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2219 | return -EOPNOTSUPP45; | |||
2220 | ||||
2221 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2222 | ||||
2223 | if (smu->ppt_funcs->set_fan_control_mode) | |||
2224 | ret = smu->ppt_funcs->set_fan_control_mode(smu, value); | |||
2225 | ||||
2226 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2227 | ||||
2228 | return ret; | |||
2229 | } | |||
2230 | ||||
2231 | int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) | |||
2232 | { | |||
2233 | int ret = 0; | |||
2234 | uint32_t percent; | |||
2235 | uint32_t current_rpm; | |||
2236 | ||||
2237 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2238 | return -EOPNOTSUPP45; | |||
2239 | ||||
2240 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2241 | ||||
2242 | if (smu->ppt_funcs->get_fan_speed_rpm) { | |||
2243 | ret = smu->ppt_funcs->get_fan_speed_rpm(smu, ¤t_rpm); | |||
2244 | if (!ret) { | |||
2245 | percent = current_rpm * 100 / smu->fan_max_rpm; | |||
2246 | *speed = percent > 100 ? 100 : percent; | |||
2247 | } | |||
2248 | } | |||
2249 | ||||
2250 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2251 | ||||
2252 | ||||
2253 | return ret; | |||
2254 | } | |||
2255 | ||||
2256 | int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) | |||
2257 | { | |||
2258 | int ret = 0; | |||
2259 | ||||
2260 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2261 | return -EOPNOTSUPP45; | |||
2262 | ||||
2263 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2264 | ||||
2265 | if (smu->ppt_funcs->set_fan_speed_percent) | |||
2266 | ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); | |||
2267 | ||||
2268 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2269 | ||||
2270 | return ret; | |||
2271 | } | |||
2272 | ||||
2273 | int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) | |||
2274 | { | |||
2275 | int ret = 0; | |||
2276 | ||||
2277 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2278 | return -EOPNOTSUPP45; | |||
2279 | ||||
2280 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2281 | ||||
2282 | if (smu->ppt_funcs->get_fan_speed_rpm) | |||
2283 | ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); | |||
2284 | ||||
2285 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2286 | ||||
2287 | return ret; | |||
2288 | } | |||
2289 | ||||
2290 | int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) | |||
2291 | { | |||
2292 | int ret = 0; | |||
2293 | ||||
2294 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2295 | return -EOPNOTSUPP45; | |||
2296 | ||||
2297 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2298 | ||||
2299 | ret = smu_set_min_dcef_deep_sleep(smu, clk)((smu)->ppt_funcs ? ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep(smu, clk) : 0) : -22); | |||
2300 | ||||
2301 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2302 | ||||
2303 | return ret; | |||
2304 | } | |||
2305 | ||||
2306 | int smu_get_clock_by_type(struct smu_context *smu, | |||
2307 | enum amd_pp_clock_type type, | |||
2308 | struct amd_pp_clocks *clocks) | |||
2309 | { | |||
2310 | int ret = 0; | |||
2311 | ||||
2312 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2313 | return -EOPNOTSUPP45; | |||
2314 | ||||
2315 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2316 | ||||
2317 | if (smu->ppt_funcs->get_clock_by_type) | |||
2318 | ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks); | |||
2319 | ||||
2320 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2321 | ||||
2322 | return ret; | |||
2323 | } | |||
2324 | ||||
2325 | int smu_get_max_high_clocks(struct smu_context *smu, | |||
2326 | struct amd_pp_simple_clock_info *clocks) | |||
2327 | { | |||
2328 | int ret = 0; | |||
2329 | ||||
2330 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2331 | return -EOPNOTSUPP45; | |||
2332 | ||||
2333 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2334 | ||||
2335 | if (smu->ppt_funcs->get_max_high_clocks) | |||
2336 | ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks); | |||
2337 | ||||
2338 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2339 | ||||
2340 | return ret; | |||
2341 | } | |||
2342 | ||||
2343 | int smu_get_clock_by_type_with_latency(struct smu_context *smu, | |||
2344 | enum smu_clk_type clk_type, | |||
2345 | struct pp_clock_levels_with_latency *clocks) | |||
2346 | { | |||
2347 | int ret = 0; | |||
2348 | ||||
2349 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2350 | return -EOPNOTSUPP45; | |||
2351 | ||||
2352 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2353 | ||||
2354 | if (smu->ppt_funcs->get_clock_by_type_with_latency) | |||
2355 | ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); | |||
2356 | ||||
2357 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2358 | ||||
2359 | return ret; | |||
2360 | } | |||
2361 | ||||
2362 | int smu_get_clock_by_type_with_voltage(struct smu_context *smu, | |||
2363 | enum amd_pp_clock_type type, | |||
2364 | struct pp_clock_levels_with_voltage *clocks) | |||
2365 | { | |||
2366 | int ret = 0; | |||
2367 | ||||
2368 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2369 | return -EOPNOTSUPP45; | |||
2370 | ||||
2371 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2372 | ||||
2373 | if (smu->ppt_funcs->get_clock_by_type_with_voltage) | |||
2374 | ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks); | |||
2375 | ||||
2376 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2377 | ||||
2378 | return ret; | |||
2379 | } | |||
2380 | ||||
2381 | ||||
2382 | int smu_display_clock_voltage_request(struct smu_context *smu, | |||
2383 | struct pp_display_clock_request *clock_req) | |||
2384 | { | |||
2385 | int ret = 0; | |||
2386 | ||||
2387 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2388 | return -EOPNOTSUPP45; | |||
2389 | ||||
2390 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2391 | ||||
2392 | if (smu->ppt_funcs->display_clock_voltage_request) | |||
2393 | ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); | |||
2394 | ||||
2395 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2396 | ||||
2397 | return ret; | |||
2398 | } | |||
2399 | ||||
2400 | ||||
2401 | int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool_Bool disable_memory_clock_switch) | |||
2402 | { | |||
2403 | int ret = -EINVAL22; | |||
2404 | ||||
2405 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2406 | return -EOPNOTSUPP45; | |||
2407 | ||||
2408 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2409 | ||||
2410 | if (smu->ppt_funcs->display_disable_memory_clock_switch) | |||
2411 | ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); | |||
2412 | ||||
2413 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2414 | ||||
2415 | return ret; | |||
2416 | } | |||
2417 | ||||
2418 | int smu_notify_smu_enable_pwe(struct smu_context *smu) | |||
2419 | { | |||
2420 | int ret = 0; | |||
2421 | ||||
2422 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2423 | return -EOPNOTSUPP45; | |||
2424 | ||||
2425 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2426 | ||||
2427 | if (smu->ppt_funcs->notify_smu_enable_pwe) | |||
2428 | ret = smu->ppt_funcs->notify_smu_enable_pwe(smu); | |||
2429 | ||||
2430 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2431 | ||||
2432 | return ret; | |||
2433 | } | |||
2434 | ||||
2435 | int smu_set_xgmi_pstate(struct smu_context *smu, | |||
2436 | uint32_t pstate) | |||
2437 | { | |||
2438 | int ret = 0; | |||
2439 | ||||
2440 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2441 | return -EOPNOTSUPP45; | |||
2442 | ||||
2443 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2444 | ||||
2445 | if (smu->ppt_funcs->set_xgmi_pstate) | |||
2446 | ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); | |||
2447 | ||||
2448 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2449 | ||||
2450 | if(ret) | |||
2451 | dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n")printf("drm:pid%d:%s *ERROR* " "Failed to set XGMI pstate!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
2452 | ||||
2453 | return ret; | |||
2454 | } | |||
2455 | ||||
2456 | int smu_set_azalia_d3_pme(struct smu_context *smu) | |||
2457 | { | |||
2458 | int ret = 0; | |||
2459 | ||||
2460 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2461 | return -EOPNOTSUPP45; | |||
2462 | ||||
2463 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2464 | ||||
2465 | if (smu->ppt_funcs->set_azalia_d3_pme) | |||
2466 | ret = smu->ppt_funcs->set_azalia_d3_pme(smu); | |||
2467 | ||||
2468 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2469 | ||||
2470 | return ret; | |||
2471 | } | |||
2472 | ||||
2473 | /* | |||
2474 | * On system suspending or resetting, the dpm_enabled | |||
2475 | * flag will be cleared. So that those SMU services which | |||
2476 | * are not supported will be gated. | |||
2477 | * | |||
2478 | * However, the baco/mode1 reset should still be granted | |||
2479 | * as they are still supported and necessary. | |||
2480 | */ | |||
2481 | bool_Bool smu_baco_is_support(struct smu_context *smu) | |||
2482 | { | |||
2483 | bool_Bool ret = false0; | |||
2484 | ||||
2485 | if (!smu->pm_enabled) | |||
2486 | return false0; | |||
2487 | ||||
2488 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2489 | ||||
2490 | if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) | |||
2491 | ret = smu->ppt_funcs->baco_is_support(smu); | |||
2492 | ||||
2493 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2494 | ||||
2495 | return ret; | |||
2496 | } | |||
2497 | ||||
2498 | int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) | |||
2499 | { | |||
2500 | if (smu->ppt_funcs->baco_get_state) | |||
2501 | return -EINVAL22; | |||
2502 | ||||
2503 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2504 | *state = smu->ppt_funcs->baco_get_state(smu); | |||
2505 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2506 | ||||
2507 | return 0; | |||
2508 | } | |||
2509 | ||||
2510 | int smu_baco_enter(struct smu_context *smu) | |||
2511 | { | |||
2512 | int ret = 0; | |||
2513 | ||||
2514 | if (!smu->pm_enabled) | |||
2515 | return -EOPNOTSUPP45; | |||
2516 | ||||
2517 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2518 | ||||
2519 | if (smu->ppt_funcs->baco_enter) | |||
2520 | ret = smu->ppt_funcs->baco_enter(smu); | |||
2521 | ||||
2522 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2523 | ||||
2524 | if (ret) | |||
2525 | dev_err(smu->adev->dev, "Failed to enter BACO state!\n")printf("drm:pid%d:%s *ERROR* " "Failed to enter BACO state!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
2526 | ||||
2527 | return ret; | |||
2528 | } | |||
2529 | ||||
2530 | int smu_baco_exit(struct smu_context *smu) | |||
2531 | { | |||
2532 | int ret = 0; | |||
2533 | ||||
2534 | if (!smu->pm_enabled) | |||
2535 | return -EOPNOTSUPP45; | |||
2536 | ||||
2537 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2538 | ||||
2539 | if (smu->ppt_funcs->baco_exit) | |||
2540 | ret = smu->ppt_funcs->baco_exit(smu); | |||
2541 | ||||
2542 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2543 | ||||
2544 | if (ret) | |||
2545 | dev_err(smu->adev->dev, "Failed to exit BACO state!\n")printf("drm:pid%d:%s *ERROR* " "Failed to exit BACO state!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); | |||
2546 | ||||
2547 | return ret; | |||
2548 | } | |||
2549 | ||||
2550 | bool_Bool smu_mode1_reset_is_support(struct smu_context *smu) | |||
2551 | { | |||
2552 | bool_Bool ret = false0; | |||
2553 | ||||
2554 | if (!smu->pm_enabled) | |||
2555 | return false0; | |||
2556 | ||||
2557 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2558 | ||||
2559 | if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) | |||
2560 | ret = smu->ppt_funcs->mode1_reset_is_support(smu); | |||
2561 | ||||
2562 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2563 | ||||
2564 | return ret; | |||
2565 | } | |||
2566 | ||||
2567 | int smu_mode1_reset(struct smu_context *smu) | |||
2568 | { | |||
2569 | int ret = 0; | |||
2570 | ||||
2571 | if (!smu->pm_enabled) | |||
2572 | return -EOPNOTSUPP45; | |||
2573 | ||||
2574 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2575 | ||||
2576 | if (smu->ppt_funcs->mode1_reset) | |||
2577 | ret = smu->ppt_funcs->mode1_reset(smu); | |||
2578 | ||||
2579 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2580 | ||||
2581 | return ret; | |||
2582 | } | |||
2583 | ||||
2584 | int smu_mode2_reset(struct smu_context *smu) | |||
2585 | { | |||
2586 | int ret = 0; | |||
2587 | ||||
2588 | if (!smu->pm_enabled) | |||
2589 | return -EOPNOTSUPP45; | |||
2590 | ||||
2591 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2592 | ||||
2593 | if (smu->ppt_funcs->mode2_reset) | |||
2594 | ret = smu->ppt_funcs->mode2_reset(smu); | |||
2595 | ||||
2596 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2597 | ||||
2598 | if (ret) | |||
2599 | dev_err(smu->adev->dev, "Mode2 reset failed!\n")printf("drm:pid%d:%s *ERROR* " "Mode2 reset failed!\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci ) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci; })->ci_curproc->p_p->ps_pid, __func__); | |||
2600 | ||||
2601 | return ret; | |||
2602 | } | |||
2603 | ||||
2604 | int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, | |||
2605 | struct pp_smu_nv_clock_table *max_clocks) | |||
2606 | { | |||
2607 | int ret = 0; | |||
2608 | ||||
2609 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2610 | return -EOPNOTSUPP45; | |||
2611 | ||||
2612 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2613 | ||||
2614 | if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) | |||
2615 | ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); | |||
2616 | ||||
2617 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2618 | ||||
2619 | return ret; | |||
2620 | } | |||
2621 | ||||
2622 | int smu_get_uclk_dpm_states(struct smu_context *smu, | |||
2623 | unsigned int *clock_values_in_khz, | |||
2624 | unsigned int *num_states) | |||
2625 | { | |||
2626 | int ret = 0; | |||
2627 | ||||
2628 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2629 | return -EOPNOTSUPP45; | |||
2630 | ||||
2631 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2632 | ||||
2633 | if (smu->ppt_funcs->get_uclk_dpm_states) | |||
2634 | ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); | |||
2635 | ||||
2636 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2637 | ||||
2638 | return ret; | |||
2639 | } | |||
2640 | ||||
2641 | enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) | |||
2642 | { | |||
2643 | enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; | |||
2644 | ||||
2645 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2646 | return -EOPNOTSUPP45; | |||
2647 | ||||
2648 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2649 | ||||
2650 | if (smu->ppt_funcs->get_current_power_state) | |||
2651 | pm_state = smu->ppt_funcs->get_current_power_state(smu); | |||
2652 | ||||
2653 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2654 | ||||
2655 | return pm_state; | |||
2656 | } | |||
2657 | ||||
2658 | int smu_get_dpm_clock_table(struct smu_context *smu, | |||
2659 | struct dpm_clocks *clock_table) | |||
2660 | { | |||
2661 | int ret = 0; | |||
2662 | ||||
2663 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2664 | return -EOPNOTSUPP45; | |||
2665 | ||||
2666 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2667 | ||||
2668 | if (smu->ppt_funcs->get_dpm_clock_table) | |||
2669 | ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); | |||
2670 | ||||
2671 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2672 | ||||
2673 | return ret; | |||
2674 | } | |||
2675 | ||||
2676 | ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu, | |||
2677 | void **table) | |||
2678 | { | |||
2679 | ssize_t size; | |||
2680 | ||||
2681 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2682 | return -EOPNOTSUPP45; | |||
2683 | ||||
2684 | if (!smu->ppt_funcs->get_gpu_metrics) | |||
2685 | return -EOPNOTSUPP45; | |||
2686 | ||||
2687 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2688 | ||||
2689 | size = smu->ppt_funcs->get_gpu_metrics(smu, table); | |||
2690 | ||||
2691 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2692 | ||||
2693 | return size; | |||
2694 | } | |||
2695 | ||||
2696 | int smu_enable_mgpu_fan_boost(struct smu_context *smu) | |||
2697 | { | |||
2698 | int ret = 0; | |||
2699 | ||||
2700 | if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) | |||
2701 | return -EOPNOTSUPP45; | |||
2702 | ||||
2703 | mutex_lock(&smu->mutex)rw_enter_write(&smu->mutex); | |||
2704 | ||||
2705 | if (smu->ppt_funcs->enable_mgpu_fan_boost) | |||
2706 | ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); | |||
2707 | ||||
2708 | mutex_unlock(&smu->mutex)rw_exit_write(&smu->mutex); | |||
2709 | ||||
2710 | return ret; | |||
2711 | } |