File: | dev/pci/drm/amd/amdgpu/df_v3_6.c |
Warning: | line 418, column 21 The left operand of '==' is a garbage value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | |||
2 | * Copyright 2018 Advanced Micro Devices, Inc. | |||
3 | * | |||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |||
5 | * copy of this software and associated documentation files (the "Software"), | |||
6 | * to deal in the Software without restriction, including without limitation | |||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |||
8 | * and/or sell copies of the Software, and to permit persons to whom the | |||
9 | * Software is furnished to do so, subject to the following conditions: | |||
10 | * | |||
11 | * The above copyright notice and this permission notice shall be included in | |||
12 | * all copies or substantial portions of the Software. | |||
13 | * | |||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |||
20 | * OTHER DEALINGS IN THE SOFTWARE. | |||
21 | * | |||
22 | */ | |||
23 | #include "amdgpu.h" | |||
24 | #include "df_v3_6.h" | |||
25 | ||||
26 | #include "df/df_3_6_default.h" | |||
27 | #include "df/df_3_6_offset.h" | |||
28 | #include "df/df_3_6_sh_mask.h" | |||
29 | ||||
30 | #define DF_3_6_SMN_REG_INST_DIST0x8 0x8 | |||
31 | #define DF_3_6_INST_CNT8 8 | |||
32 | ||||
33 | /* Defined in global_features.h as FTI_PERFMON_VISIBLE */ | |||
34 | #define DF_V3_6_MAX_COUNTERS4 4 | |||
35 | ||||
36 | /* get flags from df perfmon config */ | |||
37 | #define DF_V3_6_GET_EVENT(x)(x & 0xFFUL) (x & 0xFFUL) | |||
38 | #define DF_V3_6_GET_INSTANCE(x)((x >> 8) & 0xFFUL) ((x >> 8) & 0xFFUL) | |||
39 | #define DF_V3_6_GET_UNITMASK(x)((x >> 16) & 0xFFUL) ((x >> 16) & 0xFFUL) | |||
40 | #define DF_V3_6_PERFMON_OVERFLOW0xFFFFFFFFFFFFULL 0xFFFFFFFFFFFFULL | |||
41 | ||||
42 | static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0, | |||
43 | 16, 32, 0, 0, 0, 2, 4, 8}; | |||
44 | ||||
45 | static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev, | |||
46 | uint32_t ficaa_val) | |||
47 | { | |||
48 | unsigned long flags, address, data; | |||
49 | uint32_t ficadl_val, ficadh_val; | |||
50 | ||||
51 | address = adev->nbio.funcs->get_pcie_index_offset(adev); | |||
52 | data = adev->nbio.funcs->get_pcie_data_offset(adev); | |||
53 | ||||
54 | spin_lock_irqsave(&adev->pcie_idx_lock, flags)do { flags = 0; mtx_enter(&adev->pcie_idx_lock); } while (0); | |||
55 | WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3)amdgpu_device_wreg(adev, (address), (0x1d05cUL), 0); | |||
56 | WREG32(data, ficaa_val)amdgpu_device_wreg(adev, (data), (ficaa_val), 0); | |||
57 | ||||
58 | WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3)amdgpu_device_wreg(adev, (address), (0x1d098UL), 0); | |||
59 | ficadl_val = RREG32(data)amdgpu_device_rreg(adev, (data), 0); | |||
60 | ||||
61 | WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3)amdgpu_device_wreg(adev, (address), (0x1d09cUL), 0); | |||
62 | ficadh_val = RREG32(data)amdgpu_device_rreg(adev, (data), 0); | |||
63 | ||||
64 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags)do { (void)(flags); mtx_leave(&adev->pcie_idx_lock); } while (0); | |||
65 | ||||
66 | return (((ficadh_val & 0xFFFFFFFFFFFFFFFF) << 32) | ficadl_val); | |||
67 | } | |||
68 | ||||
69 | static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val, | |||
70 | uint32_t ficadl_val, uint32_t ficadh_val) | |||
71 | { | |||
72 | unsigned long flags, address, data; | |||
73 | ||||
74 | address = adev->nbio.funcs->get_pcie_index_offset(adev); | |||
75 | data = adev->nbio.funcs->get_pcie_data_offset(adev); | |||
76 | ||||
77 | spin_lock_irqsave(&adev->pcie_idx_lock, flags)do { flags = 0; mtx_enter(&adev->pcie_idx_lock); } while (0); | |||
78 | WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3)amdgpu_device_wreg(adev, (address), (0x1d05cUL), 0); | |||
79 | WREG32(data, ficaa_val)amdgpu_device_wreg(adev, (data), (ficaa_val), 0); | |||
80 | ||||
81 | WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3)amdgpu_device_wreg(adev, (address), (0x1d098UL), 0); | |||
82 | WREG32(data, ficadl_val)amdgpu_device_wreg(adev, (data), (ficadl_val), 0); | |||
83 | ||||
84 | WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3)amdgpu_device_wreg(adev, (address), (0x1d09cUL), 0); | |||
85 | WREG32(data, ficadh_val)amdgpu_device_wreg(adev, (data), (ficadh_val), 0); | |||
86 | ||||
87 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags)do { (void)(flags); mtx_leave(&adev->pcie_idx_lock); } while (0); | |||
88 | } | |||
89 | ||||
90 | /* | |||
91 | * df_v3_6_perfmon_rreg - read perfmon lo and hi | |||
92 | * | |||
93 | * required to be atomic. no mmio method provided so subsequent reads for lo | |||
94 | * and hi require to preserve df finite state machine | |||
95 | */ | |||
96 | static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev, | |||
97 | uint32_t lo_addr, uint32_t *lo_val, | |||
98 | uint32_t hi_addr, uint32_t *hi_val) | |||
99 | { | |||
100 | unsigned long flags, address, data; | |||
101 | ||||
102 | address = adev->nbio.funcs->get_pcie_index_offset(adev); | |||
103 | data = adev->nbio.funcs->get_pcie_data_offset(adev); | |||
104 | ||||
105 | spin_lock_irqsave(&adev->pcie_idx_lock, flags)do { flags = 0; mtx_enter(&adev->pcie_idx_lock); } while (0); | |||
106 | WREG32(address, lo_addr)amdgpu_device_wreg(adev, (address), (lo_addr), 0); | |||
107 | *lo_val = RREG32(data)amdgpu_device_rreg(adev, (data), 0); | |||
108 | WREG32(address, hi_addr)amdgpu_device_wreg(adev, (address), (hi_addr), 0); | |||
109 | *hi_val = RREG32(data)amdgpu_device_rreg(adev, (data), 0); | |||
110 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags)do { (void)(flags); mtx_leave(&adev->pcie_idx_lock); } while (0); | |||
111 | } | |||
112 | ||||
113 | /* | |||
114 | * df_v3_6_perfmon_wreg - write to perfmon lo and hi | |||
115 | * | |||
116 | * required to be atomic. no mmio method provided so subsequent reads after | |||
117 | * data writes cannot occur to preserve data fabrics finite state machine. | |||
118 | */ | |||
119 | static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr, | |||
120 | uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val) | |||
121 | { | |||
122 | unsigned long flags, address, data; | |||
123 | ||||
124 | address = adev->nbio.funcs->get_pcie_index_offset(adev); | |||
125 | data = adev->nbio.funcs->get_pcie_data_offset(adev); | |||
126 | ||||
127 | spin_lock_irqsave(&adev->pcie_idx_lock, flags)do { flags = 0; mtx_enter(&adev->pcie_idx_lock); } while (0); | |||
128 | WREG32(address, lo_addr)amdgpu_device_wreg(adev, (address), (lo_addr), 0); | |||
129 | WREG32(data, lo_val)amdgpu_device_wreg(adev, (data), (lo_val), 0); | |||
130 | WREG32(address, hi_addr)amdgpu_device_wreg(adev, (address), (hi_addr), 0); | |||
131 | WREG32(data, hi_val)amdgpu_device_wreg(adev, (data), (hi_val), 0); | |||
132 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags)do { (void)(flags); mtx_leave(&adev->pcie_idx_lock); } while (0); | |||
133 | } | |||
134 | ||||
135 | /* same as perfmon_wreg but return status on write value check */ | |||
136 | static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev, | |||
137 | uint32_t lo_addr, uint32_t lo_val, | |||
138 | uint32_t hi_addr, uint32_t hi_val) | |||
139 | { | |||
140 | unsigned long flags, address, data; | |||
141 | uint32_t lo_val_rb, hi_val_rb; | |||
142 | ||||
143 | address = adev->nbio.funcs->get_pcie_index_offset(adev); | |||
144 | data = adev->nbio.funcs->get_pcie_data_offset(adev); | |||
145 | ||||
146 | spin_lock_irqsave(&adev->pcie_idx_lock, flags)do { flags = 0; mtx_enter(&adev->pcie_idx_lock); } while (0); | |||
147 | WREG32(address, lo_addr)amdgpu_device_wreg(adev, (address), (lo_addr), 0); | |||
148 | WREG32(data, lo_val)amdgpu_device_wreg(adev, (data), (lo_val), 0); | |||
149 | WREG32(address, hi_addr)amdgpu_device_wreg(adev, (address), (hi_addr), 0); | |||
150 | WREG32(data, hi_val)amdgpu_device_wreg(adev, (data), (hi_val), 0); | |||
151 | ||||
152 | WREG32(address, lo_addr)amdgpu_device_wreg(adev, (address), (lo_addr), 0); | |||
153 | lo_val_rb = RREG32(data)amdgpu_device_rreg(adev, (data), 0); | |||
154 | WREG32(address, hi_addr)amdgpu_device_wreg(adev, (address), (hi_addr), 0); | |||
155 | hi_val_rb = RREG32(data)amdgpu_device_rreg(adev, (data), 0); | |||
156 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags)do { (void)(flags); mtx_leave(&adev->pcie_idx_lock); } while (0); | |||
157 | ||||
158 | if (!(lo_val == lo_val_rb && hi_val == hi_val_rb)) | |||
159 | return -EBUSY16; | |||
160 | ||||
161 | return 0; | |||
162 | } | |||
163 | ||||
164 | ||||
165 | /* | |||
166 | * retry arming counters every 100 usecs within 1 millisecond interval. | |||
167 | * if retry fails after time out, return error. | |||
168 | */ | |||
169 | #define ARM_RETRY_USEC_TIMEOUT1000 1000 | |||
170 | #define ARM_RETRY_USEC_INTERVAL100 100 | |||
171 | static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev, | |||
172 | uint32_t lo_addr, uint32_t lo_val, | |||
173 | uint32_t hi_addr, uint32_t hi_val) | |||
174 | { | |||
175 | int countdown = ARM_RETRY_USEC_TIMEOUT1000; | |||
176 | ||||
177 | while (countdown) { | |||
178 | ||||
179 | if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val, | |||
180 | hi_addr, hi_val)) | |||
181 | break; | |||
182 | ||||
183 | countdown -= ARM_RETRY_USEC_INTERVAL100; | |||
184 | udelay(ARM_RETRY_USEC_INTERVAL100); | |||
185 | } | |||
186 | ||||
187 | return countdown > 0 ? 0 : -ETIME60; | |||
188 | } | |||
189 | ||||
190 | /* get the number of df counters available */ | |||
191 | static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev, | |||
192 | struct device_attribute *attr, | |||
193 | char *buf) | |||
194 | { | |||
195 | struct amdgpu_device *adev; | |||
196 | struct drm_device *ddev; | |||
197 | int i, count; | |||
198 | ||||
199 | ddev = dev_get_drvdata(dev); | |||
200 | adev = drm_to_adev(ddev); | |||
201 | count = 0; | |||
202 | ||||
203 | for (i = 0; i < DF_V3_6_MAX_COUNTERS4; i++) { | |||
204 | if (adev->df_perfmon_config_assign_mask[i] == 0) | |||
205 | count++; | |||
206 | } | |||
207 | ||||
208 | return sysfs_emit(buf, "%i\n", count); | |||
209 | } | |||
210 | ||||
211 | /* device attr for available perfmon counters */ | |||
212 | static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL)struct device_attribute dev_attr_df_cntr_avail; | |||
213 | ||||
214 | static void df_v3_6_query_hashes(struct amdgpu_device *adev) | |||
215 | { | |||
216 | u32 tmp; | |||
217 | ||||
218 | adev->df.hash_status.hash_64k = false0; | |||
219 | adev->df.hash_status.hash_2m = false0; | |||
220 | adev->df.hash_status.hash_1g = false0; | |||
221 | ||||
222 | /* encoding for hash-enabled on Arcturus and Aldebaran */ | |||
223 | if ((adev->asic_type == CHIP_ARCTURUS && | |||
224 | adev->df.funcs->get_fb_channel_number(adev) == 0xe) || | |||
225 | (adev->asic_type == CHIP_ALDEBARAN && | |||
226 | adev->df.funcs->get_fb_channel_number(adev) == 0x1e)) { | |||
227 | tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DfGlobalCtrl)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x00fe, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x00fe), 0)); | |||
228 | adev->df.hash_status.hash_64k = REG_GET_FIELD(tmp,(((tmp) & 0x00100000L) >> 0x14) | |||
229 | DF_CS_UMC_AON0_DfGlobalCtrl,(((tmp) & 0x00100000L) >> 0x14) | |||
230 | GlbHashIntlvCtl64K)(((tmp) & 0x00100000L) >> 0x14); | |||
231 | adev->df.hash_status.hash_2m = REG_GET_FIELD(tmp,(((tmp) & 0x00200000L) >> 0x15) | |||
232 | DF_CS_UMC_AON0_DfGlobalCtrl,(((tmp) & 0x00200000L) >> 0x15) | |||
233 | GlbHashIntlvCtl2M)(((tmp) & 0x00200000L) >> 0x15); | |||
234 | adev->df.hash_status.hash_1g = REG_GET_FIELD(tmp,(((tmp) & 0x00400000L) >> 0x16) | |||
235 | DF_CS_UMC_AON0_DfGlobalCtrl,(((tmp) & 0x00400000L) >> 0x16) | |||
236 | GlbHashIntlvCtl1G)(((tmp) & 0x00400000L) >> 0x16); | |||
237 | } | |||
238 | } | |||
239 | ||||
240 | /* init perfmons */ | |||
241 | static void df_v3_6_sw_init(struct amdgpu_device *adev) | |||
242 | { | |||
243 | int i, ret; | |||
244 | ||||
245 | ret = device_create_file(adev->dev, &dev_attr_df_cntr_avail)0; | |||
246 | if (ret) | |||
247 | DRM_ERROR("failed to create file for available df counters\n")__drm_err("failed to create file for available df counters\n" ); | |||
248 | ||||
249 | for (i = 0; i < AMDGPU_MAX_DF_PERFMONS4; i++) | |||
250 | adev->df_perfmon_config_assign_mask[i] = 0; | |||
251 | ||||
252 | df_v3_6_query_hashes(adev); | |||
253 | } | |||
254 | ||||
255 | static void df_v3_6_sw_fini(struct amdgpu_device *adev) | |||
256 | { | |||
257 | ||||
258 | device_remove_file(adev->dev, &dev_attr_df_cntr_avail); | |||
259 | ||||
260 | } | |||
261 | ||||
262 | static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev, | |||
263 | bool_Bool enable) | |||
264 | { | |||
265 | u32 tmp; | |||
266 | ||||
267 | if (enable) { | |||
268 | tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x0410, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x0410), 0)); | |||
269 | tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK0x00000001L; | |||
270 | WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[DF_HWIP][0][ 0] + 0x0410), tmp, 0, DF_HWIP) : amdgpu_device_wreg(adev, ((adev ->reg_offset[DF_HWIP][0][0] + 0x0410)), (tmp), 0)); | |||
271 | } else | |||
272 | WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[DF_HWIP][0][ 0] + 0x0410), 0x00000000, 0, DF_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[DF_HWIP][0][0] + 0x0410)), (0x00000000 ), 0)) | |||
273 | mmFabricConfigAccessControl_DEFAULT)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[DF_HWIP][0][ 0] + 0x0410), 0x00000000, 0, DF_HWIP) : amdgpu_device_wreg(adev , ((adev->reg_offset[DF_HWIP][0][0] + 0x0410)), (0x00000000 ), 0)); | |||
274 | } | |||
275 | ||||
276 | static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev) | |||
277 | { | |||
278 | u32 tmp; | |||
279 | ||||
280 | if (adev->asic_type == CHIP_ALDEBARAN) { | |||
281 | tmp = RREG32_SOC15(DF, 0, mmDF_GCM_AON0_DramMegaBaseAddress0)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x0064, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x0064), 0)); | |||
282 | tmp &= | |||
283 | ALDEBARAN_DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK0x0000007CL; | |||
284 | } else { | |||
285 | tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x0044, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x0044), 0)); | |||
286 | tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK0x0000003CL; | |||
287 | } | |||
288 | tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT0x2; | |||
289 | ||||
290 | return tmp; | |||
291 | } | |||
292 | ||||
293 | static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev) | |||
294 | { | |||
295 | int fb_channel_number; | |||
296 | ||||
297 | fb_channel_number = adev->df.funcs->get_fb_channel_number(adev); | |||
298 | if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number)(sizeof((df_v3_6_channel_number)) / sizeof((df_v3_6_channel_number )[0]))) | |||
299 | fb_channel_number = 0; | |||
300 | ||||
301 | return df_v3_6_channel_number[fb_channel_number]; | |||
302 | } | |||
303 | ||||
304 | static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, | |||
305 | bool_Bool enable) | |||
306 | { | |||
307 | u32 tmp; | |||
308 | ||||
309 | if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG(1ULL << 23)) { | |||
310 | /* Put DF on broadcast mode */ | |||
311 | adev->df.funcs->enable_broadcast_mode(adev, true1); | |||
312 | ||||
313 | if (enable) { | |||
314 | tmp = RREG32_SOC15(DF, 0,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x00fc, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x00fc), 0)) | |||
315 | mmDF_PIE_AON0_DfGlobalClkGater)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x00fc, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x00fc), 0)); | |||
316 | tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK0x0000000FL; | |||
317 | tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; | |||
318 | WREG32_SOC15(DF, 0,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[DF_HWIP][0][ 0] + 0x00fc), tmp, 0, DF_HWIP) : amdgpu_device_wreg(adev, ((adev ->reg_offset[DF_HWIP][0][0] + 0x00fc)), (tmp), 0)) | |||
319 | mmDF_PIE_AON0_DfGlobalClkGater, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[DF_HWIP][0][ 0] + 0x00fc), tmp, 0, DF_HWIP) : amdgpu_device_wreg(adev, ((adev ->reg_offset[DF_HWIP][0][0] + 0x00fc)), (tmp), 0)); | |||
320 | } else { | |||
321 | tmp = RREG32_SOC15(DF, 0,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x00fc, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x00fc), 0)) | |||
322 | mmDF_PIE_AON0_DfGlobalClkGater)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x00fc, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x00fc), 0)); | |||
323 | tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK0x0000000FL; | |||
324 | tmp |= DF_V3_6_MGCG_DISABLE; | |||
325 | WREG32_SOC15(DF, 0,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[DF_HWIP][0][ 0] + 0x00fc), tmp, 0, DF_HWIP) : amdgpu_device_wreg(adev, ((adev ->reg_offset[DF_HWIP][0][0] + 0x00fc)), (tmp), 0)) | |||
326 | mmDF_PIE_AON0_DfGlobalClkGater, tmp)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_wreg(adev, (adev->reg_offset[DF_HWIP][0][ 0] + 0x00fc), tmp, 0, DF_HWIP) : amdgpu_device_wreg(adev, ((adev ->reg_offset[DF_HWIP][0][0] + 0x00fc)), (tmp), 0)); | |||
327 | } | |||
328 | ||||
329 | /* Exit broadcast mode */ | |||
330 | adev->df.funcs->enable_broadcast_mode(adev, false0); | |||
331 | } | |||
332 | } | |||
333 | ||||
334 | static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev, | |||
335 | u64 *flags) | |||
336 | { | |||
337 | u32 tmp; | |||
338 | ||||
339 | /* AMD_CG_SUPPORT_DF_MGCG */ | |||
340 | tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x00fc, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x00fc), 0)); | |||
341 | if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY) | |||
342 | *flags |= AMD_CG_SUPPORT_DF_MGCG(1ULL << 23); | |||
343 | } | |||
344 | ||||
345 | /* get assigned df perfmon ctr as int */ | |||
346 | static bool_Bool df_v3_6_pmc_has_counter(struct amdgpu_device *adev, | |||
347 | uint64_t config, | |||
348 | int counter_idx) | |||
349 | { | |||
350 | ||||
351 | return ((config & 0x0FFFFFFUL) == | |||
352 | adev->df_perfmon_config_assign_mask[counter_idx]); | |||
353 | ||||
354 | } | |||
355 | ||||
356 | /* get address based on counter assignment */ | |||
357 | static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev, | |||
358 | uint64_t config, | |||
359 | int counter_idx, | |||
360 | int is_ctrl, | |||
361 | uint32_t *lo_base_addr, | |||
362 | uint32_t *hi_base_addr) | |||
363 | { | |||
364 | if (!df_v3_6_pmc_has_counter(adev, config, counter_idx)) | |||
365 | return; | |||
366 | ||||
367 | switch (counter_idx) { | |||
368 | ||||
369 | case 0: | |||
370 | *lo_base_addr = is_ctrl ? smnPerfMonCtlLo40x01d880UL : smnPerfMonCtrLo40x01d790UL; | |||
371 | *hi_base_addr = is_ctrl ? smnPerfMonCtlHi40x01d884UL : smnPerfMonCtrHi40x01d794UL; | |||
372 | break; | |||
373 | case 1: | |||
374 | *lo_base_addr = is_ctrl ? smnPerfMonCtlLo50x01d888UL : smnPerfMonCtrLo50x01d798UL; | |||
375 | *hi_base_addr = is_ctrl ? smnPerfMonCtlHi50x01d88cUL : smnPerfMonCtrHi50x01d79cUL; | |||
376 | break; | |||
377 | case 2: | |||
378 | *lo_base_addr = is_ctrl ? smnPerfMonCtlLo60x01d890UL : smnPerfMonCtrLo60x01d7a0UL; | |||
379 | *hi_base_addr = is_ctrl ? smnPerfMonCtlHi60x01d894UL : smnPerfMonCtrHi60x01d7a4UL; | |||
380 | break; | |||
381 | case 3: | |||
382 | *lo_base_addr = is_ctrl ? smnPerfMonCtlLo70x01d898UL : smnPerfMonCtrLo70x01d7a8UL; | |||
383 | *hi_base_addr = is_ctrl ? smnPerfMonCtlHi70x01d89cUL : smnPerfMonCtrHi70x01d7acUL; | |||
384 | break; | |||
385 | ||||
386 | } | |||
387 | ||||
388 | } | |||
389 | ||||
390 | /* get read counter address */ | |||
391 | static void df_v3_6_pmc_get_read_settings(struct amdgpu_device *adev, | |||
392 | uint64_t config, | |||
393 | int counter_idx, | |||
394 | uint32_t *lo_base_addr, | |||
395 | uint32_t *hi_base_addr) | |||
396 | { | |||
397 | df_v3_6_pmc_get_addr(adev, config, counter_idx, 0, lo_base_addr, | |||
398 | hi_base_addr); | |||
399 | } | |||
400 | ||||
401 | /* get control counter settings i.e. address and values to set */ | |||
402 | static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev, | |||
403 | uint64_t config, | |||
404 | int counter_idx, | |||
405 | uint32_t *lo_base_addr, | |||
406 | uint32_t *hi_base_addr, | |||
407 | uint32_t *lo_val, | |||
408 | uint32_t *hi_val, | |||
409 | bool_Bool is_enable) | |||
410 | { | |||
411 | ||||
412 | uint32_t eventsel, instance, unitmask; | |||
413 | uint32_t instance_10, instance_5432, instance_76; | |||
414 | ||||
415 | df_v3_6_pmc_get_addr(adev, config, counter_idx, 1, lo_base_addr, | |||
416 | hi_base_addr); | |||
417 | ||||
418 | if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) { | |||
| ||||
419 | DRM_ERROR("[DF PMC] addressing not retrieved! Lo: %x, Hi: %x",__drm_err("[DF PMC] addressing not retrieved! Lo: %x, Hi: %x" , *lo_base_addr, *hi_base_addr) | |||
420 | *lo_base_addr, *hi_base_addr)__drm_err("[DF PMC] addressing not retrieved! Lo: %x, Hi: %x" , *lo_base_addr, *hi_base_addr); | |||
421 | return -ENXIO6; | |||
422 | } | |||
423 | ||||
424 | eventsel = DF_V3_6_GET_EVENT(config)(config & 0xFFUL) & 0x3f; | |||
425 | unitmask = DF_V3_6_GET_UNITMASK(config)((config >> 16) & 0xFFUL) & 0xf; | |||
426 | instance = DF_V3_6_GET_INSTANCE(config)((config >> 8) & 0xFFUL); | |||
427 | ||||
428 | instance_10 = instance & 0x3; | |||
429 | instance_5432 = (instance >> 2) & 0xf; | |||
430 | instance_76 = (instance >> 6) & 0x3; | |||
431 | ||||
432 | *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel; | |||
433 | *lo_val = is_enable ? *lo_val | (1 << 22) : *lo_val & ~(1 << 22); | |||
434 | *hi_val = (instance_76 << 29) | instance_5432; | |||
435 | ||||
436 | DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",___drm_dbg(((void *)0), DRM_UT_DRIVER, "config=%llx addr=%08x:%08x val=%08x:%08x" , config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val) | |||
437 | config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val)___drm_dbg(((void *)0), DRM_UT_DRIVER, "config=%llx addr=%08x:%08x val=%08x:%08x" , config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val); | |||
438 | ||||
439 | return 0; | |||
440 | } | |||
441 | ||||
442 | /* add df performance counters for read */ | |||
443 | static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev, | |||
444 | uint64_t config) | |||
445 | { | |||
446 | int i; | |||
447 | ||||
448 | for (i = 0; i < DF_V3_6_MAX_COUNTERS4; i++) { | |||
449 | if (adev->df_perfmon_config_assign_mask[i] == 0U) { | |||
450 | adev->df_perfmon_config_assign_mask[i] = | |||
451 | config & 0x0FFFFFFUL; | |||
452 | return i; | |||
453 | } | |||
454 | } | |||
455 | ||||
456 | return -ENOSPC28; | |||
457 | } | |||
458 | ||||
459 | #define DEFERRED_ARM_MASK(1 << 31) (1 << 31) | |||
460 | static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev, | |||
461 | uint64_t config, int counter_idx, | |||
462 | bool_Bool is_deferred) | |||
463 | { | |||
464 | ||||
465 | if (!df_v3_6_pmc_has_counter(adev, config, counter_idx)) | |||
466 | return -EINVAL22; | |||
467 | ||||
468 | if (is_deferred) | |||
469 | adev->df_perfmon_config_assign_mask[counter_idx] |= | |||
470 | DEFERRED_ARM_MASK(1 << 31); | |||
471 | else | |||
472 | adev->df_perfmon_config_assign_mask[counter_idx] &= | |||
473 | ~DEFERRED_ARM_MASK(1 << 31); | |||
474 | ||||
475 | return 0; | |||
476 | } | |||
477 | ||||
478 | static bool_Bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev, | |||
479 | uint64_t config, | |||
480 | int counter_idx) | |||
481 | { | |||
482 | return (df_v3_6_pmc_has_counter(adev, config, counter_idx) && | |||
483 | (adev->df_perfmon_config_assign_mask[counter_idx] | |||
484 | & DEFERRED_ARM_MASK(1 << 31))); | |||
485 | ||||
486 | } | |||
487 | ||||
488 | /* release performance counter */ | |||
489 | static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev, | |||
490 | uint64_t config, | |||
491 | int counter_idx) | |||
492 | { | |||
493 | if (df_v3_6_pmc_has_counter(adev, config, counter_idx)) | |||
494 | adev->df_perfmon_config_assign_mask[counter_idx] = 0ULL; | |||
495 | } | |||
496 | ||||
497 | ||||
498 | static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev, | |||
499 | uint64_t config, | |||
500 | int counter_idx) | |||
501 | { | |||
502 | uint32_t lo_base_addr = 0, hi_base_addr = 0; | |||
503 | ||||
504 | df_v3_6_pmc_get_read_settings(adev, config, counter_idx, &lo_base_addr, | |||
505 | &hi_base_addr); | |||
506 | ||||
507 | if ((lo_base_addr == 0) || (hi_base_addr == 0)) | |||
508 | return; | |||
509 | ||||
510 | df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0); | |||
511 | } | |||
512 | ||||
513 | /* return available counter if is_add == 1 otherwise return error status. */ | |||
514 | static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config, | |||
515 | int counter_idx, int is_add) | |||
516 | { | |||
517 | uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; | |||
518 | int err = 0, ret = 0; | |||
519 | ||||
520 | switch (adev->asic_type) { | |||
521 | case CHIP_VEGA20: | |||
522 | case CHIP_ARCTURUS: | |||
523 | if (is_add) | |||
524 | return df_v3_6_pmc_add_cntr(adev, config); | |||
525 | ||||
526 | ret = df_v3_6_pmc_get_ctrl_settings(adev, | |||
527 | config, | |||
528 | counter_idx, | |||
529 | &lo_base_addr, | |||
530 | &hi_base_addr, | |||
531 | &lo_val, | |||
532 | &hi_val, | |||
533 | true1); | |||
534 | ||||
535 | if (ret) | |||
536 | return ret; | |||
537 | ||||
538 | err = df_v3_6_perfmon_arm_with_retry(adev, | |||
539 | lo_base_addr, | |||
540 | lo_val, | |||
541 | hi_base_addr, | |||
542 | hi_val); | |||
543 | ||||
544 | if (err) | |||
545 | ret = df_v3_6_pmc_set_deferred(adev, config, | |||
546 | counter_idx, true1); | |||
547 | ||||
548 | break; | |||
549 | default: | |||
550 | break; | |||
551 | } | |||
552 | ||||
553 | return ret; | |||
554 | } | |||
555 | ||||
556 | static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config, | |||
557 | int counter_idx, int is_remove) | |||
558 | { | |||
559 | uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; | |||
| ||||
560 | int ret = 0; | |||
561 | ||||
562 | switch (adev->asic_type) { | |||
563 | case CHIP_VEGA20: | |||
564 | case CHIP_ARCTURUS: | |||
565 | ret = df_v3_6_pmc_get_ctrl_settings(adev, | |||
566 | config, | |||
567 | counter_idx, | |||
568 | &lo_base_addr, | |||
569 | &hi_base_addr, | |||
570 | &lo_val, | |||
571 | &hi_val, | |||
572 | false0); | |||
573 | ||||
574 | if (ret) | |||
575 | return ret; | |||
576 | ||||
577 | df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val, | |||
578 | hi_base_addr, hi_val); | |||
579 | ||||
580 | if (is_remove) { | |||
581 | df_v3_6_reset_perfmon_cntr(adev, config, counter_idx); | |||
582 | df_v3_6_pmc_release_cntr(adev, config, counter_idx); | |||
583 | } | |||
584 | ||||
585 | break; | |||
586 | default: | |||
587 | break; | |||
588 | } | |||
589 | ||||
590 | return ret; | |||
591 | } | |||
592 | ||||
593 | static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, | |||
594 | uint64_t config, | |||
595 | int counter_idx, | |||
596 | uint64_t *count) | |||
597 | { | |||
598 | uint32_t lo_base_addr = 0, hi_base_addr = 0, lo_val = 0, hi_val = 0; | |||
599 | *count = 0; | |||
600 | ||||
601 | switch (adev->asic_type) { | |||
602 | case CHIP_VEGA20: | |||
603 | case CHIP_ARCTURUS: | |||
604 | df_v3_6_pmc_get_read_settings(adev, config, counter_idx, | |||
605 | &lo_base_addr, &hi_base_addr); | |||
606 | ||||
607 | if ((lo_base_addr == 0) || (hi_base_addr == 0)) | |||
608 | return; | |||
609 | ||||
610 | /* rearm the counter or throw away count value on failure */ | |||
611 | if (df_v3_6_pmc_is_deferred(adev, config, counter_idx)) { | |||
612 | int rearm_err = df_v3_6_perfmon_arm_with_status(adev, | |||
613 | lo_base_addr, lo_val, | |||
614 | hi_base_addr, hi_val); | |||
615 | ||||
616 | if (rearm_err) | |||
617 | return; | |||
618 | ||||
619 | df_v3_6_pmc_set_deferred(adev, config, counter_idx, | |||
620 | false0); | |||
621 | } | |||
622 | ||||
623 | df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val, | |||
624 | hi_base_addr, &hi_val); | |||
625 | ||||
626 | *count = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL); | |||
627 | ||||
628 | if (*count >= DF_V3_6_PERFMON_OVERFLOW0xFFFFFFFFFFFFULL) | |||
629 | *count = 0; | |||
630 | ||||
631 | DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",___drm_dbg(((void *)0), DRM_UT_DRIVER, "config=%llx addr=%08x:%08x val=%08x:%08x" , config, lo_base_addr, hi_base_addr, lo_val, hi_val) | |||
632 | config, lo_base_addr, hi_base_addr, lo_val, hi_val)___drm_dbg(((void *)0), DRM_UT_DRIVER, "config=%llx addr=%08x:%08x val=%08x:%08x" , config, lo_base_addr, hi_base_addr, lo_val, hi_val); | |||
633 | ||||
634 | break; | |||
635 | default: | |||
636 | break; | |||
637 | } | |||
638 | } | |||
639 | ||||
640 | static bool_Bool df_v3_6_query_ras_poison_mode(struct amdgpu_device *adev) | |||
641 | { | |||
642 | uint32_t hw_assert_msklo, hw_assert_mskhi; | |||
643 | uint32_t v0, v1, v28, v31; | |||
644 | ||||
645 | hw_assert_msklo = RREG32_SOC15(DF, 0,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x067e, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x067e), 0)) | |||
646 | mmDF_CS_UMC_AON0_HardwareAssertMaskLow)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x067e, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x067e), 0)); | |||
647 | hw_assert_mskhi = RREG32_SOC15(DF, 0,((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x067f, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x067f), 0)) | |||
648 | mmDF_NCS_PG0_HardwareAssertMaskHigh)((((adev)->virt.caps & (1 << 2)) && adev ->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported ) ? amdgpu_sriov_rreg(adev, adev->reg_offset[DF_HWIP][0][0 ] + 0x067f, 0, DF_HWIP) : amdgpu_device_rreg(adev, (adev-> reg_offset[DF_HWIP][0][0] + 0x067f), 0)); | |||
649 | ||||
650 | v0 = REG_GET_FIELD(hw_assert_msklo,(((hw_assert_msklo) & 0x00000001L) >> 0x0) | |||
651 | DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk0)(((hw_assert_msklo) & 0x00000001L) >> 0x0); | |||
652 | v1 = REG_GET_FIELD(hw_assert_msklo,(((hw_assert_msklo) & 0x00000002L) >> 0x1) | |||
653 | DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk1)(((hw_assert_msklo) & 0x00000002L) >> 0x1); | |||
654 | v28 = REG_GET_FIELD(hw_assert_mskhi,(((hw_assert_mskhi) & 0x10000000L) >> 0x1c) | |||
655 | DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk28)(((hw_assert_mskhi) & 0x10000000L) >> 0x1c); | |||
656 | v31 = REG_GET_FIELD(hw_assert_mskhi,(((hw_assert_mskhi) & 0x80000000L) >> 0x1f) | |||
657 | DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk31)(((hw_assert_mskhi) & 0x80000000L) >> 0x1f); | |||
658 | ||||
659 | if (v0 && v1 && v28 && v31) | |||
660 | return true1; | |||
661 | else if (!v0 && !v1 && !v28 && !v31) | |||
662 | return false0; | |||
663 | else { | |||
664 | dev_warn(adev->dev, "DF poison setting is inconsistent(%d:%d:%d:%d)!\n",printf("drm:pid%d:%s *WARNING* " "DF poison setting is inconsistent(%d:%d:%d:%d)!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , v0, v1 , v28, v31) | |||
665 | v0, v1, v28, v31)printf("drm:pid%d:%s *WARNING* " "DF poison setting is inconsistent(%d:%d:%d:%d)!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , v0, v1 , v28, v31); | |||
666 | return false0; | |||
667 | } | |||
668 | } | |||
669 | ||||
670 | const struct amdgpu_df_funcs df_v3_6_funcs = { | |||
671 | .sw_init = df_v3_6_sw_init, | |||
672 | .sw_fini = df_v3_6_sw_fini, | |||
673 | .enable_broadcast_mode = df_v3_6_enable_broadcast_mode, | |||
674 | .get_fb_channel_number = df_v3_6_get_fb_channel_number, | |||
675 | .get_hbm_channel_number = df_v3_6_get_hbm_channel_number, | |||
676 | .update_medium_grain_clock_gating = | |||
677 | df_v3_6_update_medium_grain_clock_gating, | |||
678 | .get_clockgating_state = df_v3_6_get_clockgating_state, | |||
679 | .pmc_start = df_v3_6_pmc_start, | |||
680 | .pmc_stop = df_v3_6_pmc_stop, | |||
681 | .pmc_get_count = df_v3_6_pmc_get_count, | |||
682 | .get_fica = df_v3_6_get_fica, | |||
683 | .set_fica = df_v3_6_set_fica, | |||
684 | .query_ras_poison_mode = df_v3_6_query_ras_poison_mode, | |||
685 | }; |