File: | dev/pci/drm/amd/amdgpu/gmc_v7_0.c |
Warning: | line 1215, column 3 Value stored to 'tmp' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include <linux/firmware.h> |
25 | #include <linux/module.h> |
26 | #include <linux/pci.h> |
27 | |
28 | #include <drm/drm_cache.h> |
29 | #include "amdgpu.h" |
30 | #include "cikd.h" |
31 | #include "cik.h" |
32 | #include "gmc_v7_0.h" |
33 | #include "amdgpu_ucode.h" |
34 | #include "amdgpu_amdkfd.h" |
35 | #include "amdgpu_gem.h" |
36 | |
37 | #include "bif/bif_4_1_d.h" |
38 | #include "bif/bif_4_1_sh_mask.h" |
39 | |
40 | #include "gmc/gmc_7_1_d.h" |
41 | #include "gmc/gmc_7_1_sh_mask.h" |
42 | |
43 | #include "oss/oss_2_0_d.h" |
44 | #include "oss/oss_2_0_sh_mask.h" |
45 | |
46 | #include "dce/dce_8_0_d.h" |
47 | #include "dce/dce_8_0_sh_mask.h" |
48 | |
49 | #include "amdgpu_atombios.h" |
50 | |
51 | #include "ivsrcid/ivsrcid_vislands30.h" |
52 | |
53 | static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev); |
54 | static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); |
55 | static int gmc_v7_0_wait_for_idle(void *handle); |
56 | |
57 | MODULE_FIRMWARE("amdgpu/bonaire_mc.bin"); |
58 | MODULE_FIRMWARE("amdgpu/hawaii_mc.bin"); |
59 | MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); |
60 | |
61 | static const u32 golden_settings_iceland_a11[] = |
62 | { |
63 | mmVM_PRT_APERTURE0_LOW_ADDR0x52c, 0x0fffffff, 0x0fffffff, |
64 | mmVM_PRT_APERTURE1_LOW_ADDR0x52d, 0x0fffffff, 0x0fffffff, |
65 | mmVM_PRT_APERTURE2_LOW_ADDR0x52e, 0x0fffffff, 0x0fffffff, |
66 | mmVM_PRT_APERTURE3_LOW_ADDR0x52f, 0x0fffffff, 0x0fffffff |
67 | }; |
68 | |
69 | static const u32 iceland_mgcg_cgcg_init[] = |
70 | { |
71 | mmMC_MEM_POWER_LS0x82a, 0xffffffff, 0x00000104 |
72 | }; |
73 | |
74 | static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) |
75 | { |
76 | switch (adev->asic_type) { |
77 | case CHIP_TOPAZ: |
78 | amdgpu_device_program_register_sequence(adev, |
79 | iceland_mgcg_cgcg_init, |
80 | ARRAY_SIZE(iceland_mgcg_cgcg_init)(sizeof((iceland_mgcg_cgcg_init)) / sizeof((iceland_mgcg_cgcg_init )[0]))); |
81 | amdgpu_device_program_register_sequence(adev, |
82 | golden_settings_iceland_a11, |
83 | ARRAY_SIZE(golden_settings_iceland_a11)(sizeof((golden_settings_iceland_a11)) / sizeof((golden_settings_iceland_a11 )[0]))); |
84 | break; |
85 | default: |
86 | break; |
87 | } |
88 | } |
89 | |
90 | static void gmc_v7_0_mc_stop(struct amdgpu_device *adev) |
91 | { |
92 | u32 blackout; |
93 | |
94 | gmc_v7_0_wait_for_idle((void *)adev); |
95 | |
96 | blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL)amdgpu_device_rreg(adev, (0x82b), 0); |
97 | if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE)(((blackout) & 0x7) >> 0x0) != 1) { |
98 | /* Block CPU access */ |
99 | WREG32(mmBIF_FB_EN, 0)amdgpu_device_wreg(adev, (0x1524), (0), 0); |
100 | /* blackout the MC */ |
101 | blackout = REG_SET_FIELD(blackout,(((blackout) & ~0x7) | (0x7 & ((0) << 0x0))) |
102 | MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0)(((blackout) & ~0x7) | (0x7 & ((0) << 0x0))); |
103 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1)amdgpu_device_wreg(adev, (0x82b), (blackout | 1), 0); |
104 | } |
105 | /* wait for the MC to settle */ |
106 | udelay(100); |
107 | } |
108 | |
109 | static void gmc_v7_0_mc_resume(struct amdgpu_device *adev) |
110 | { |
111 | u32 tmp; |
112 | |
113 | /* unblackout the MC */ |
114 | tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL)amdgpu_device_rreg(adev, (0x82b), 0); |
115 | tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0)(((tmp) & ~0x7) | (0x7 & ((0) << 0x0))); |
116 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp)amdgpu_device_wreg(adev, (0x82b), (tmp), 0); |
117 | /* allow CPU access */ |
118 | tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1)(((0) & ~0x1) | (0x1 & ((1) << 0x0))); |
119 | tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1)(((tmp) & ~0x2) | (0x2 & ((1) << 0x1))); |
120 | WREG32(mmBIF_FB_EN, tmp)amdgpu_device_wreg(adev, (0x1524), (tmp), 0); |
121 | } |
122 | |
123 | /** |
124 | * gmc_v7_0_init_microcode - load ucode images from disk |
125 | * |
126 | * @adev: amdgpu_device pointer |
127 | * |
128 | * Use the firmware interface to load the ucode images into |
129 | * the driver (not loaded into hw). |
130 | * Returns 0 on success, error on failure. |
131 | */ |
132 | static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) |
133 | { |
134 | const char *chip_name; |
135 | char fw_name[30]; |
136 | int err; |
137 | |
138 | DRM_DEBUG("\n")__drm_dbg(DRM_UT_CORE, "\n"); |
139 | |
140 | switch (adev->asic_type) { |
141 | case CHIP_BONAIRE: |
142 | chip_name = "bonaire"; |
143 | break; |
144 | case CHIP_HAWAII: |
145 | chip_name = "hawaii"; |
146 | break; |
147 | case CHIP_TOPAZ: |
148 | chip_name = "topaz"; |
149 | break; |
150 | case CHIP_KAVERI: |
151 | case CHIP_KABINI: |
152 | case CHIP_MULLINS: |
153 | return 0; |
154 | default: BUG()do { panic("BUG at %s:%d", "/usr/src/sys/dev/pci/drm/amd/amdgpu/gmc_v7_0.c" , 154); } while (0); |
155 | } |
156 | |
157 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); |
158 | |
159 | err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); |
160 | if (err) |
161 | goto out; |
162 | err = amdgpu_ucode_validate(adev->gmc.fw); |
163 | |
164 | out: |
165 | if (err) { |
166 | pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name)printk("\0013" "amdgpu: " "cik_mc: Failed to load firmware \"%s\"\n" , fw_name); |
167 | release_firmware(adev->gmc.fw); |
168 | adev->gmc.fw = NULL((void *)0); |
169 | } |
170 | return err; |
171 | } |
172 | |
173 | /** |
174 | * gmc_v7_0_mc_load_microcode - load MC ucode into the hw |
175 | * |
176 | * @adev: amdgpu_device pointer |
177 | * |
178 | * Load the GDDR MC ucode into the hw (CIK). |
179 | * Returns 0 on success, error on failure. |
180 | */ |
181 | static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) |
182 | { |
183 | const struct mc_firmware_header_v1_0 *hdr; |
184 | const __le32 *fw_data = NULL((void *)0); |
185 | const __le32 *io_mc_regs = NULL((void *)0); |
186 | u32 running; |
187 | int i, ucode_size, regs_size; |
188 | |
189 | if (!adev->gmc.fw) |
190 | return -EINVAL22; |
191 | |
192 | hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; |
193 | amdgpu_ucode_print_mc_hdr(&hdr->header); |
194 | |
195 | adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version)((__uint32_t)(hdr->header.ucode_version)); |
196 | regs_size = le32_to_cpu(hdr->io_debug_size_bytes)((__uint32_t)(hdr->io_debug_size_bytes)) / (4 * 2); |
197 | io_mc_regs = (const __le32 *) |
198 | (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)((__uint32_t)(hdr->io_debug_array_offset_bytes))); |
199 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes)((__uint32_t)(hdr->header.ucode_size_bytes)) / 4; |
200 | fw_data = (const __le32 *) |
201 | (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)((__uint32_t)(hdr->header.ucode_array_offset_bytes))); |
202 | |
203 | running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN)(((amdgpu_device_rreg(adev, (0xa32), 0)) & 0x1) >> 0x0 ); |
204 | |
205 | if (running == 0) { |
206 | /* reset the engine and set to writable */ |
207 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008)amdgpu_device_wreg(adev, (0xa32), (0x00000008), 0); |
208 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010)amdgpu_device_wreg(adev, (0xa32), (0x00000010), 0); |
209 | |
210 | /* load mc io regs */ |
211 | for (i = 0; i < regs_size; i++) { |
212 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++))amdgpu_device_wreg(adev, (0xa91), (((__uint32_t)(*(__uint32_t *)(io_mc_regs++)))), 0); |
213 | WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++))amdgpu_device_wreg(adev, (0xa92), (((__uint32_t)(*(__uint32_t *)(io_mc_regs++)))), 0); |
214 | } |
215 | /* load the MC ucode */ |
216 | for (i = 0; i < ucode_size; i++) |
217 | WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++))amdgpu_device_wreg(adev, (0xa33), (((__uint32_t)(*(__uint32_t *)(fw_data++)))), 0); |
218 | |
219 | /* put the engine back into the active state */ |
220 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008)amdgpu_device_wreg(adev, (0xa32), (0x00000008), 0); |
221 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004)amdgpu_device_wreg(adev, (0xa32), (0x00000004), 0); |
222 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001)amdgpu_device_wreg(adev, (0xa32), (0x00000001), 0); |
223 | |
224 | /* wait for training to complete */ |
225 | for (i = 0; i < adev->usec_timeout; i++) { |
226 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),(((amdgpu_device_rreg(adev, (0xa3a), 0)) & 0x40000000) >> 0x1e) |
227 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0)(((amdgpu_device_rreg(adev, (0xa3a), 0)) & 0x40000000) >> 0x1e)) |
228 | break; |
229 | udelay(1); |
230 | } |
231 | for (i = 0; i < adev->usec_timeout; i++) { |
232 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),(((amdgpu_device_rreg(adev, (0xa3a), 0)) & 0x80000000) >> 0x1f) |
233 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1)(((amdgpu_device_rreg(adev, (0xa3a), 0)) & 0x80000000) >> 0x1f)) |
234 | break; |
235 | udelay(1); |
236 | } |
237 | } |
238 | |
239 | return 0; |
240 | } |
241 | |
242 | static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, |
243 | struct amdgpu_gmc *mc) |
244 | { |
245 | u64 base = RREG32(mmMC_VM_FB_LOCATION)amdgpu_device_rreg(adev, (0x809), 0) & 0xFFFF; |
246 | base <<= 24; |
247 | |
248 | amdgpu_gmc_vram_location(adev, mc, base); |
249 | amdgpu_gmc_gart_location(adev, mc); |
250 | } |
251 | |
252 | /** |
253 | * gmc_v7_0_mc_program - program the GPU memory controller |
254 | * |
255 | * @adev: amdgpu_device pointer |
256 | * |
257 | * Set the location of vram, gart, and AGP in the GPU's |
258 | * physical address space (CIK). |
259 | */ |
260 | static void gmc_v7_0_mc_program(struct amdgpu_device *adev) |
261 | { |
262 | u32 tmp; |
263 | int i, j; |
264 | |
265 | /* Initialize HDP */ |
266 | for (i = 0, j = 0; i < 32; i++, j += 0x6) { |
267 | WREG32((0xb05 + j), 0x00000000)amdgpu_device_wreg(adev, ((0xb05 + j)), (0x00000000), 0); |
268 | WREG32((0xb06 + j), 0x00000000)amdgpu_device_wreg(adev, ((0xb06 + j)), (0x00000000), 0); |
269 | WREG32((0xb07 + j), 0x00000000)amdgpu_device_wreg(adev, ((0xb07 + j)), (0x00000000), 0); |
270 | WREG32((0xb08 + j), 0x00000000)amdgpu_device_wreg(adev, ((0xb08 + j)), (0x00000000), 0); |
271 | WREG32((0xb09 + j), 0x00000000)amdgpu_device_wreg(adev, ((0xb09 + j)), (0x00000000), 0); |
272 | } |
273 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0)amdgpu_device_wreg(adev, (0x1528), (0), 0); |
274 | |
275 | if (gmc_v7_0_wait_for_idle((void *)adev)) { |
276 | dev_warn(adev->dev, "Wait for MC idle timedout !\n")printf("drm:pid%d:%s *WARNING* " "Wait for MC idle timedout !\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
277 | } |
278 | if (adev->mode_info.num_crtc) { |
279 | /* Lockout access through VGA aperture*/ |
280 | tmp = RREG32(mmVGA_HDP_CONTROL)amdgpu_device_rreg(adev, (0xca), 0); |
281 | tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1)(((tmp) & ~0x10) | (0x10 & ((1) << 0x4))); |
282 | WREG32(mmVGA_HDP_CONTROL, tmp)amdgpu_device_wreg(adev, (0xca), (tmp), 0); |
283 | |
284 | /* disable VGA render */ |
285 | tmp = RREG32(mmVGA_RENDER_CONTROL)amdgpu_device_rreg(adev, (0xc0), 0); |
286 | tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0)(((tmp) & ~0x30000) | (0x30000 & ((0) << 0x10)) ); |
287 | WREG32(mmVGA_RENDER_CONTROL, tmp)amdgpu_device_wreg(adev, (0xc0), (tmp), 0); |
288 | } |
289 | /* Update configuration */ |
290 | WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,amdgpu_device_wreg(adev, (0x80d), (adev->gmc.vram_start >> 12), 0) |
291 | adev->gmc.vram_start >> 12)amdgpu_device_wreg(adev, (0x80d), (adev->gmc.vram_start >> 12), 0); |
292 | WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,amdgpu_device_wreg(adev, (0x80e), (adev->gmc.vram_end >> 12), 0) |
293 | adev->gmc.vram_end >> 12)amdgpu_device_wreg(adev, (0x80e), (adev->gmc.vram_end >> 12), 0); |
294 | WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,amdgpu_device_wreg(adev, (0x80f), (adev->vram_scratch.gpu_addr >> 12), 0) |
295 | adev->vram_scratch.gpu_addr >> 12)amdgpu_device_wreg(adev, (0x80f), (adev->vram_scratch.gpu_addr >> 12), 0); |
296 | WREG32(mmMC_VM_AGP_BASE, 0)amdgpu_device_wreg(adev, (0x80c), (0), 0); |
297 | WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF)amdgpu_device_wreg(adev, (0x80a), (0x0FFFFFFF), 0); |
298 | WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF)amdgpu_device_wreg(adev, (0x80b), (0x0FFFFFFF), 0); |
299 | if (gmc_v7_0_wait_for_idle((void *)adev)) { |
300 | dev_warn(adev->dev, "Wait for MC idle timedout !\n")printf("drm:pid%d:%s *WARNING* " "Wait for MC idle timedout !\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
301 | } |
302 | |
303 | WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK)amdgpu_device_wreg(adev, (0x1524), (0x1 | 0x2), 0); |
304 | |
305 | tmp = RREG32(mmHDP_MISC_CNTL)amdgpu_device_rreg(adev, (0xbd3), 0); |
306 | tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0)(((tmp) & ~0x1) | (0x1 & ((0) << 0x0))); |
307 | WREG32(mmHDP_MISC_CNTL, tmp)amdgpu_device_wreg(adev, (0xbd3), (tmp), 0); |
308 | |
309 | tmp = RREG32(mmHDP_HOST_PATH_CNTL)amdgpu_device_rreg(adev, (0xb00), 0); |
310 | WREG32(mmHDP_HOST_PATH_CNTL, tmp)amdgpu_device_wreg(adev, (0xb00), (tmp), 0); |
311 | } |
312 | |
313 | /** |
314 | * gmc_v7_0_mc_init - initialize the memory controller driver params |
315 | * |
316 | * @adev: amdgpu_device pointer |
317 | * |
318 | * Look up the amount of vram, vram width, and decide how to place |
319 | * vram and gart within the GPU's physical address space (CIK). |
320 | * Returns 0 for success. |
321 | */ |
322 | static int gmc_v7_0_mc_init(struct amdgpu_device *adev) |
323 | { |
324 | int r; |
325 | |
326 | adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); |
327 | if (!adev->gmc.vram_width) { |
328 | u32 tmp; |
329 | int chansize, numchan; |
330 | |
331 | /* Get VRAM informations */ |
332 | tmp = RREG32(mmMC_ARB_RAMCFG)amdgpu_device_rreg(adev, (0x9d8), 0); |
333 | if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)(((tmp) & 0x100) >> 0x8)) { |
334 | chansize = 64; |
335 | } else { |
336 | chansize = 32; |
337 | } |
338 | tmp = RREG32(mmMC_SHARED_CHMAP)amdgpu_device_rreg(adev, (0x801), 0); |
339 | switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)(((tmp) & 0xf000) >> 0xc)) { |
340 | case 0: |
341 | default: |
342 | numchan = 1; |
343 | break; |
344 | case 1: |
345 | numchan = 2; |
346 | break; |
347 | case 2: |
348 | numchan = 4; |
349 | break; |
350 | case 3: |
351 | numchan = 8; |
352 | break; |
353 | case 4: |
354 | numchan = 3; |
355 | break; |
356 | case 5: |
357 | numchan = 6; |
358 | break; |
359 | case 6: |
360 | numchan = 10; |
361 | break; |
362 | case 7: |
363 | numchan = 12; |
364 | break; |
365 | case 8: |
366 | numchan = 16; |
367 | break; |
368 | } |
369 | adev->gmc.vram_width = numchan * chansize; |
370 | } |
371 | /* size in MB on si */ |
372 | adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE)amdgpu_device_rreg(adev, (0x150a), 0) * 1024ULL * 1024ULL; |
373 | adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE)amdgpu_device_rreg(adev, (0x150a), 0) * 1024ULL * 1024ULL; |
374 | |
375 | if (!(adev->flags & AMD_IS_APU)) { |
376 | r = amdgpu_device_resize_fb_bar(adev); |
377 | if (r) |
378 | return r; |
379 | } |
380 | adev->gmc.aper_base = adev->fb_aper_offset; |
381 | adev->gmc.aper_size = adev->fb_aper_size; |
382 | |
383 | #ifdef CONFIG_X86_641 |
384 | if (adev->flags & AMD_IS_APU && |
385 | adev->gmc.real_vram_size > adev->gmc.aper_size) { |
386 | adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)amdgpu_device_rreg(adev, (0x81a), 0)) << 22; |
387 | adev->gmc.aper_size = adev->gmc.real_vram_size; |
388 | } |
389 | #endif |
390 | |
391 | /* In case the PCI BAR is larger than the actual amount of vram */ |
392 | adev->gmc.visible_vram_size = adev->gmc.aper_size; |
393 | if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) |
394 | adev->gmc.visible_vram_size = adev->gmc.real_vram_size; |
395 | |
396 | /* set the gart size */ |
397 | if (amdgpu_gart_size == -1) { |
398 | switch (adev->asic_type) { |
399 | case CHIP_TOPAZ: /* no MM engines */ |
400 | default: |
401 | adev->gmc.gart_size = 256ULL << 20; |
402 | break; |
403 | #ifdef CONFIG_DRM_AMDGPU_CIK |
404 | case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */ |
405 | case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */ |
406 | case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */ |
407 | case CHIP_KABINI: /* UVD, VCE do not support GPUVM */ |
408 | case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */ |
409 | adev->gmc.gart_size = 1024ULL << 20; |
410 | break; |
411 | #endif |
412 | } |
413 | } else { |
414 | adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; |
415 | } |
416 | |
417 | gmc_v7_0_vram_gtt_location(adev, &adev->gmc); |
418 | |
419 | return 0; |
420 | } |
421 | |
422 | /** |
423 | * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid |
424 | * |
425 | * @adev: amdgpu_device pointer |
426 | * @pasid: pasid to be flush |
427 | * |
428 | * Flush the TLB for the requested pasid. |
429 | */ |
430 | static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, |
431 | uint16_t pasid, uint32_t flush_type, |
432 | bool_Bool all_hub) |
433 | { |
434 | int vmid; |
435 | unsigned int tmp; |
436 | |
437 | if (amdgpu_in_reset(adev)) |
438 | return -EIO5; |
439 | |
440 | for (vmid = 1; vmid < 16; vmid++) { |
441 | |
442 | tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid)amdgpu_device_rreg(adev, (0xce7 + vmid), 0); |
443 | if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK0x80000000) && |
444 | (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK0xffff) == pasid) { |
445 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid)amdgpu_device_wreg(adev, (0x51e), (1 << vmid), 0); |
446 | RREG32(mmVM_INVALIDATE_RESPONSE)amdgpu_device_rreg(adev, (0x51f), 0); |
447 | break; |
448 | } |
449 | } |
450 | |
451 | return 0; |
452 | } |
453 | |
454 | /* |
455 | * GART |
456 | * VMID 0 is the physical GPU addresses as used by the kernel. |
457 | * VMIDs 1-15 are used for userspace clients and are handled |
458 | * by the amdgpu vm/hsa code. |
459 | */ |
460 | |
461 | /** |
462 | * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback |
463 | * |
464 | * @adev: amdgpu_device pointer |
465 | * @vmid: vm instance to flush |
466 | * |
467 | * Flush the TLB for the requested page table (CIK). |
468 | */ |
469 | static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, |
470 | uint32_t vmhub, uint32_t flush_type) |
471 | { |
472 | /* bits 0-15 are the VM contexts0-15 */ |
473 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid)amdgpu_device_wreg(adev, (0x51e), (1 << vmid), 0); |
474 | } |
475 | |
476 | static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, |
477 | unsigned vmid, uint64_t pd_addr) |
478 | { |
479 | uint32_t reg; |
480 | |
481 | if (vmid < 8) |
482 | reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR0x54f + vmid; |
483 | else |
484 | reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR0x50e + vmid - 8; |
485 | amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12)(ring)->funcs->emit_wreg((ring), (reg), (pd_addr >> 12)); |
486 | |
487 | /* bits 0-15 are the VM contexts0-15 */ |
488 | amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid)(ring)->funcs->emit_wreg((ring), (0x51e), (1 << vmid )); |
489 | |
490 | return pd_addr; |
491 | } |
492 | |
493 | static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, |
494 | unsigned pasid) |
495 | { |
496 | amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid)(ring)->funcs->emit_wreg((ring), (0xf50 + vmid), (pasid )); |
497 | } |
498 | |
499 | static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level, |
500 | uint64_t *addr, uint64_t *flags) |
501 | { |
502 | BUG_ON(*addr & 0xFFFFFF0000000FFFULL)((!(*addr & 0xFFFFFF0000000FFFULL)) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/gmc_v7_0.c" , 502, "!(*addr & 0xFFFFFF0000000FFFULL)")); |
503 | } |
504 | |
505 | static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev, |
506 | struct amdgpu_bo_va_mapping *mapping, |
507 | uint64_t *flags) |
508 | { |
509 | *flags &= ~AMDGPU_PTE_EXECUTABLE(1ULL << 4); |
510 | *flags &= ~AMDGPU_PTE_PRT(1ULL << 51); |
511 | } |
512 | |
513 | /** |
514 | * gmc_v8_0_set_fault_enable_default - update VM fault handling |
515 | * |
516 | * @adev: amdgpu_device pointer |
517 | * @value: true redirects VM faults to the default page |
518 | */ |
519 | static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev, |
520 | bool_Bool value) |
521 | { |
522 | u32 tmp; |
523 | |
524 | tmp = RREG32(mmVM_CONTEXT1_CNTL)amdgpu_device_rreg(adev, (0x505), 0); |
525 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,(((tmp) & ~0x10) | (0x10 & ((value) << 0x4))) |
526 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x10) | (0x10 & ((value) << 0x4))); |
527 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,(((tmp) & ~0x80) | (0x80 & ((value) << 0x7))) |
528 | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x80) | (0x80 & ((value) << 0x7))); |
529 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,(((tmp) & ~0x400) | (0x400 & ((value) << 0xa))) |
530 | PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x400) | (0x400 & ((value) << 0xa))); |
531 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,(((tmp) & ~0x2000) | (0x2000 & ((value) << 0xd) )) |
532 | VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x2000) | (0x2000 & ((value) << 0xd) )); |
533 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,(((tmp) & ~0x10000) | (0x10000 & ((value) << 0x10 ))) |
534 | READ_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x10000) | (0x10000 & ((value) << 0x10 ))); |
535 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,(((tmp) & ~0x80000) | (0x80000 & ((value) << 0x13 ))) |
536 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value)(((tmp) & ~0x80000) | (0x80000 & ((value) << 0x13 ))); |
537 | WREG32(mmVM_CONTEXT1_CNTL, tmp)amdgpu_device_wreg(adev, (0x505), (tmp), 0); |
538 | } |
539 | |
540 | /** |
541 | * gmc_v7_0_set_prt - set PRT VM fault |
542 | * |
543 | * @adev: amdgpu_device pointer |
544 | * @enable: enable/disable VM fault handling for PRT |
545 | */ |
546 | static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool_Bool enable) |
547 | { |
548 | uint32_t tmp; |
549 | |
550 | if (enable && !adev->gmc.prt_warning) { |
551 | dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n")printf("drm:pid%d:%s *WARNING* " "Disabling VM faults because of PRT request!\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
552 | adev->gmc.prt_warning = true1; |
553 | } |
554 | |
555 | tmp = RREG32(mmVM_PRT_CNTL)amdgpu_device_rreg(adev, (0x534), 0); |
556 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,(((tmp) & ~0x1) | (0x1 & ((enable) << 0x0))) |
557 | CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable)(((tmp) & ~0x1) | (0x1 & ((enable) << 0x0))); |
558 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,(((tmp) & ~0x10) | (0x10 & ((enable) << 0x4))) |
559 | CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable)(((tmp) & ~0x10) | (0x10 & ((enable) << 0x4))); |
560 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,(((tmp) & ~0x2) | (0x2 & ((enable) << 0x1))) |
561 | TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable)(((tmp) & ~0x2) | (0x2 & ((enable) << 0x1))); |
562 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,(((tmp) & ~0x20) | (0x20 & ((enable) << 0x5))) |
563 | TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable)(((tmp) & ~0x20) | (0x20 & ((enable) << 0x5))); |
564 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,(((tmp) & ~0x4) | (0x4 & ((enable) << 0x2))) |
565 | L2_CACHE_STORE_INVALID_ENTRIES, enable)(((tmp) & ~0x4) | (0x4 & ((enable) << 0x2))); |
566 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,(((tmp) & ~0x8) | (0x8 & ((enable) << 0x3))) |
567 | L1_TLB_STORE_INVALID_ENTRIES, enable)(((tmp) & ~0x8) | (0x8 & ((enable) << 0x3))); |
568 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,(((tmp) & ~0x40) | (0x40 & ((enable) << 0x6))) |
569 | MASK_PDE0_FAULT, enable)(((tmp) & ~0x40) | (0x40 & ((enable) << 0x6))); |
570 | WREG32(mmVM_PRT_CNTL, tmp)amdgpu_device_wreg(adev, (0x534), (tmp), 0); |
571 | |
572 | if (enable) { |
573 | uint32_t low = AMDGPU_VA_RESERVED_SIZE(2ULL << 20) >> AMDGPU_GPU_PAGE_SHIFT12; |
574 | uint32_t high = adev->vm_manager.max_pfn - |
575 | (AMDGPU_VA_RESERVED_SIZE(2ULL << 20) >> AMDGPU_GPU_PAGE_SHIFT12); |
576 | |
577 | WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low)amdgpu_device_wreg(adev, (0x52c), (low), 0); |
578 | WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low)amdgpu_device_wreg(adev, (0x52d), (low), 0); |
579 | WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low)amdgpu_device_wreg(adev, (0x52e), (low), 0); |
580 | WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low)amdgpu_device_wreg(adev, (0x52f), (low), 0); |
581 | WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high)amdgpu_device_wreg(adev, (0x530), (high), 0); |
582 | WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high)amdgpu_device_wreg(adev, (0x531), (high), 0); |
583 | WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high)amdgpu_device_wreg(adev, (0x532), (high), 0); |
584 | WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high)amdgpu_device_wreg(adev, (0x533), (high), 0); |
585 | } else { |
586 | WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff)amdgpu_device_wreg(adev, (0x52c), (0xfffffff), 0); |
587 | WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff)amdgpu_device_wreg(adev, (0x52d), (0xfffffff), 0); |
588 | WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff)amdgpu_device_wreg(adev, (0x52e), (0xfffffff), 0); |
589 | WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff)amdgpu_device_wreg(adev, (0x52f), (0xfffffff), 0); |
590 | WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0)amdgpu_device_wreg(adev, (0x530), (0x0), 0); |
591 | WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0)amdgpu_device_wreg(adev, (0x531), (0x0), 0); |
592 | WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0)amdgpu_device_wreg(adev, (0x532), (0x0), 0); |
593 | WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0)amdgpu_device_wreg(adev, (0x533), (0x0), 0); |
594 | } |
595 | } |
596 | |
597 | /** |
598 | * gmc_v7_0_gart_enable - gart enable |
599 | * |
600 | * @adev: amdgpu_device pointer |
601 | * |
602 | * This sets up the TLBs, programs the page tables for VMID0, |
603 | * sets up the hw for VMIDs 1-15 which are allocated on |
604 | * demand, and sets up the global locations for the LDS, GDS, |
605 | * and GPUVM for FSA64 clients (CIK). |
606 | * Returns 0 for success, errors for failure. |
607 | */ |
608 | static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) |
609 | { |
610 | uint64_t table_addr; |
611 | int r, i; |
612 | u32 tmp, field; |
613 | |
614 | if (adev->gart.bo == NULL((void *)0)) { |
615 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n")printf("drm:pid%d:%s *ERROR* " "No VRAM object for PCIE GART.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
616 | return -EINVAL22; |
617 | } |
618 | r = amdgpu_gart_table_vram_pin(adev); |
619 | if (r) |
620 | return r; |
621 | |
622 | table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); |
623 | |
624 | /* Setup TLB control */ |
625 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL)amdgpu_device_rreg(adev, (0x819), 0); |
626 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1)(((tmp) & ~0x1) | (0x1 & ((1) << 0x0))); |
627 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1)(((tmp) & ~0x2) | (0x2 & ((1) << 0x1))); |
628 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3)(((tmp) & ~0x18) | (0x18 & ((3) << 0x3))); |
629 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1)(((tmp) & ~0x40) | (0x40 & ((1) << 0x6))); |
630 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0)(((tmp) & ~0x20) | (0x20 & ((0) << 0x5))); |
631 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp)amdgpu_device_wreg(adev, (0x819), (tmp), 0); |
632 | /* Setup L2 cache */ |
633 | tmp = RREG32(mmVM_L2_CNTL)amdgpu_device_rreg(adev, (0x500), 0); |
634 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1)(((tmp) & ~0x1) | (0x1 & ((1) << 0x0))); |
635 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1)(((tmp) & ~0x2) | (0x2 & ((1) << 0x1))); |
636 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1)(((tmp) & ~0x200) | (0x200 & ((1) << 0x9))); |
637 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1)(((tmp) & ~0x400) | (0x400 & ((1) << 0xa))); |
638 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7)(((tmp) & ~0x38000) | (0x38000 & ((7) << 0xf))); |
639 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1)(((tmp) & ~0x180000) | (0x180000 & ((1) << 0x13 ))); |
640 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1)(((tmp) & ~0x800) | (0x800 & ((1) << 0xb))); |
641 | WREG32(mmVM_L2_CNTL, tmp)amdgpu_device_wreg(adev, (0x500), (tmp), 0); |
642 | tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1)(((0) & ~0x1) | (0x1 & ((1) << 0x0))); |
643 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1)(((tmp) & ~0x2) | (0x2 & ((1) << 0x1))); |
644 | WREG32(mmVM_L2_CNTL2, tmp)amdgpu_device_wreg(adev, (0x501), (tmp), 0); |
645 | |
646 | field = adev->vm_manager.fragment_size; |
647 | tmp = RREG32(mmVM_L2_CNTL3)amdgpu_device_rreg(adev, (0x502), 0); |
648 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1)(((tmp) & ~0x100000) | (0x100000 & ((1) << 0x14 ))); |
649 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field)(((tmp) & ~0x3f) | (0x3f & ((field) << 0x0))); |
650 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field)(((tmp) & ~0xf8000) | (0xf8000 & ((field) << 0xf ))); |
651 | WREG32(mmVM_L2_CNTL3, tmp)amdgpu_device_wreg(adev, (0x502), (tmp), 0); |
652 | /* setup context0 */ |
653 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12)amdgpu_device_wreg(adev, (0x557), (adev->gmc.gart_start >> 12), 0); |
654 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12)amdgpu_device_wreg(adev, (0x55f), (adev->gmc.gart_end >> 12), 0); |
655 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12)amdgpu_device_wreg(adev, (0x54f), (table_addr >> 12), 0 ); |
656 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,amdgpu_device_wreg(adev, (0x546), ((u32)(adev->dummy_page_addr >> 12)), 0) |
657 | (u32)(adev->dummy_page_addr >> 12))amdgpu_device_wreg(adev, (0x546), ((u32)(adev->dummy_page_addr >> 12)), 0); |
658 | WREG32(mmVM_CONTEXT0_CNTL2, 0)amdgpu_device_wreg(adev, (0x50c), (0), 0); |
659 | tmp = RREG32(mmVM_CONTEXT0_CNTL)amdgpu_device_rreg(adev, (0x504), 0); |
660 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1)(((tmp) & ~0x1) | (0x1 & ((1) << 0x0))); |
661 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0)(((tmp) & ~0x6) | (0x6 & ((0) << 0x1))); |
662 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1)(((tmp) & ~0x10) | (0x10 & ((1) << 0x4))); |
663 | WREG32(mmVM_CONTEXT0_CNTL, tmp)amdgpu_device_wreg(adev, (0x504), (tmp), 0); |
664 | |
665 | WREG32(0x575, 0)amdgpu_device_wreg(adev, (0x575), (0), 0); |
666 | WREG32(0x576, 0)amdgpu_device_wreg(adev, (0x576), (0), 0); |
667 | WREG32(0x577, 0)amdgpu_device_wreg(adev, (0x577), (0), 0); |
668 | |
669 | /* empty context1-15 */ |
670 | /* FIXME start with 4G, once using 2 level pt switch to full |
671 | * vm size space |
672 | */ |
673 | /* set vm size, must be a multiple of 4 */ |
674 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0)amdgpu_device_wreg(adev, (0x558), (0), 0); |
675 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1)amdgpu_device_wreg(adev, (0x560), (adev->vm_manager.max_pfn - 1), 0); |
676 | for (i = 1; i < 16; i++) { |
677 | if (i < 8) |
678 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,amdgpu_device_wreg(adev, (0x54f + i), (table_addr >> 12 ), 0) |
679 | table_addr >> 12)amdgpu_device_wreg(adev, (0x54f + i), (table_addr >> 12 ), 0); |
680 | else |
681 | WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,amdgpu_device_wreg(adev, (0x50e + i - 8), (table_addr >> 12), 0) |
682 | table_addr >> 12)amdgpu_device_wreg(adev, (0x50e + i - 8), (table_addr >> 12), 0); |
683 | } |
684 | |
685 | /* enable context1-15 */ |
686 | WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,amdgpu_device_wreg(adev, (0x547), ((u32)(adev->dummy_page_addr >> 12)), 0) |
687 | (u32)(adev->dummy_page_addr >> 12))amdgpu_device_wreg(adev, (0x547), ((u32)(adev->dummy_page_addr >> 12)), 0); |
688 | WREG32(mmVM_CONTEXT1_CNTL2, 4)amdgpu_device_wreg(adev, (0x50d), (4), 0); |
689 | tmp = RREG32(mmVM_CONTEXT1_CNTL)amdgpu_device_rreg(adev, (0x505), 0); |
690 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1)(((tmp) & ~0x1) | (0x1 & ((1) << 0x0))); |
691 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1)(((tmp) & ~0x6) | (0x6 & ((1) << 0x1))); |
692 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,(((tmp) & ~0xf000000) | (0xf000000 & ((adev->vm_manager .block_size - 9) << 0x18))) |
693 | adev->vm_manager.block_size - 9)(((tmp) & ~0xf000000) | (0xf000000 & ((adev->vm_manager .block_size - 9) << 0x18))); |
694 | WREG32(mmVM_CONTEXT1_CNTL, tmp)amdgpu_device_wreg(adev, (0x505), (tmp), 0); |
695 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS2) |
696 | gmc_v7_0_set_fault_enable_default(adev, false0); |
697 | else |
698 | gmc_v7_0_set_fault_enable_default(adev, true1); |
699 | |
700 | if (adev->asic_type == CHIP_KAVERI) { |
701 | tmp = RREG32(mmCHUB_CONTROL)amdgpu_device_rreg(adev, (0x619), 0); |
702 | tmp &= ~BYPASS_VM(1 << 0); |
703 | WREG32(mmCHUB_CONTROL, tmp)amdgpu_device_wreg(adev, (0x619), (tmp), 0); |
704 | } |
705 | |
706 | gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0); |
707 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",printk("\0016" "[" "drm" "] " "PCIE GART of %uM enabled (table at 0x%016llX).\n" , (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr) |
708 | (unsigned)(adev->gmc.gart_size >> 20),printk("\0016" "[" "drm" "] " "PCIE GART of %uM enabled (table at 0x%016llX).\n" , (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr) |
709 | (unsigned long long)table_addr)printk("\0016" "[" "drm" "] " "PCIE GART of %uM enabled (table at 0x%016llX).\n" , (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); |
710 | adev->gart.ready = true1; |
711 | return 0; |
712 | } |
713 | |
714 | static int gmc_v7_0_gart_init(struct amdgpu_device *adev) |
715 | { |
716 | int r; |
717 | |
718 | if (adev->gart.bo) { |
719 | WARN(1, "R600 PCIE GART already initialized\n")({ int __ret = !!(1); if (__ret) printf("R600 PCIE GART already initialized\n" ); __builtin_expect(!!(__ret), 0); }); |
720 | return 0; |
721 | } |
722 | /* Initialize common gart structure */ |
723 | r = amdgpu_gart_init(adev); |
724 | if (r) |
725 | return r; |
726 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; |
727 | adev->gart.gart_pte_flags = 0; |
728 | return amdgpu_gart_table_vram_alloc(adev); |
729 | } |
730 | |
731 | /** |
732 | * gmc_v7_0_gart_disable - gart disable |
733 | * |
734 | * @adev: amdgpu_device pointer |
735 | * |
736 | * This disables all VM page table (CIK). |
737 | */ |
738 | static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) |
739 | { |
740 | u32 tmp; |
741 | |
742 | /* Disable all tables */ |
743 | WREG32(mmVM_CONTEXT0_CNTL, 0)amdgpu_device_wreg(adev, (0x504), (0), 0); |
744 | WREG32(mmVM_CONTEXT1_CNTL, 0)amdgpu_device_wreg(adev, (0x505), (0), 0); |
745 | /* Setup TLB control */ |
746 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL)amdgpu_device_rreg(adev, (0x819), 0); |
747 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0)(((tmp) & ~0x1) | (0x1 & ((0) << 0x0))); |
748 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0)(((tmp) & ~0x2) | (0x2 & ((0) << 0x1))); |
749 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0)(((tmp) & ~0x40) | (0x40 & ((0) << 0x6))); |
750 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp)amdgpu_device_wreg(adev, (0x819), (tmp), 0); |
751 | /* Setup L2 cache */ |
752 | tmp = RREG32(mmVM_L2_CNTL)amdgpu_device_rreg(adev, (0x500), 0); |
753 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0)(((tmp) & ~0x1) | (0x1 & ((0) << 0x0))); |
754 | WREG32(mmVM_L2_CNTL, tmp)amdgpu_device_wreg(adev, (0x500), (tmp), 0); |
755 | WREG32(mmVM_L2_CNTL2, 0)amdgpu_device_wreg(adev, (0x501), (0), 0); |
756 | amdgpu_gart_table_vram_unpin(adev); |
757 | } |
758 | |
759 | /** |
760 | * gmc_v7_0_vm_decode_fault - print human readable fault info |
761 | * |
762 | * @adev: amdgpu_device pointer |
763 | * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value |
764 | * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value |
765 | * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value |
766 | * |
767 | * Print human readable fault information (CIK). |
768 | */ |
769 | static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, |
770 | u32 addr, u32 mc_client, unsigned pasid) |
771 | { |
772 | u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID)(((status) & 0x1e000000) >> 0x19); |
773 | u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,(((status) & 0xff) >> 0x0) |
774 | PROTECTIONS)(((status) & 0xff) >> 0x0); |
775 | char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, |
776 | (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; |
777 | u32 mc_id; |
778 | |
779 | mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,(((status) & 0x1ff000) >> 0xc) |
780 | MEMORY_CLIENT_ID)(((status) & 0x1ff000) >> 0xc); |
781 | |
782 | dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",printf("drm:pid%d:%s *ERROR* " "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , protections , vmid, pasid, addr, (((status) & 0x1000000) >> 0x18 ) ? "write" : "read", block, mc_client, mc_id) |
783 | protections, vmid, pasid, addr,printf("drm:pid%d:%s *ERROR* " "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , protections , vmid, pasid, addr, (((status) & 0x1000000) >> 0x18 ) ? "write" : "read", block, mc_client, mc_id) |
784 | REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,printf("drm:pid%d:%s *ERROR* " "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , protections , vmid, pasid, addr, (((status) & 0x1000000) >> 0x18 ) ? "write" : "read", block, mc_client, mc_id) |
785 | MEMORY_CLIENT_RW) ?printf("drm:pid%d:%s *ERROR* " "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , protections , vmid, pasid, addr, (((status) & 0x1000000) >> 0x18 ) ? "write" : "read", block, mc_client, mc_id) |
786 | "write" : "read", block, mc_client, mc_id)printf("drm:pid%d:%s *ERROR* " "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , protections , vmid, pasid, addr, (((status) & 0x1000000) >> 0x18 ) ? "write" : "read", block, mc_client, mc_id); |
787 | } |
788 | |
789 | |
790 | static const u32 mc_cg_registers[] = { |
791 | mmMC_HUB_MISC_HUB_CG0x82e, |
792 | mmMC_HUB_MISC_SIP_CG0x830, |
793 | mmMC_HUB_MISC_VM_CG0x82f, |
794 | mmMC_XPB_CLK_GAT0x91e, |
795 | mmATC_MISC_CG0xcd4, |
796 | mmMC_CITF_MISC_WR_CG0x993, |
797 | mmMC_CITF_MISC_RD_CG0x992, |
798 | mmMC_CITF_MISC_VM_CG0x994, |
799 | mmVM_L2_CG0x570, |
800 | }; |
801 | |
802 | static const u32 mc_cg_ls_en[] = { |
803 | MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK0x80000, |
804 | MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK0x80000, |
805 | MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK0x80000, |
806 | MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK0x80000, |
807 | ATC_MISC_CG__MEM_LS_ENABLE_MASK0x80000, |
808 | MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK0x80000, |
809 | MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK0x80000, |
810 | MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK0x80000, |
811 | VM_L2_CG__MEM_LS_ENABLE_MASK0x80000, |
812 | }; |
813 | |
814 | static const u32 mc_cg_en[] = { |
815 | MC_HUB_MISC_HUB_CG__ENABLE_MASK0x40000, |
816 | MC_HUB_MISC_SIP_CG__ENABLE_MASK0x40000, |
817 | MC_HUB_MISC_VM_CG__ENABLE_MASK0x40000, |
818 | MC_XPB_CLK_GAT__ENABLE_MASK0x40000, |
819 | ATC_MISC_CG__ENABLE_MASK0x40000, |
820 | MC_CITF_MISC_WR_CG__ENABLE_MASK0x40000, |
821 | MC_CITF_MISC_RD_CG__ENABLE_MASK0x40000, |
822 | MC_CITF_MISC_VM_CG__ENABLE_MASK0x40000, |
823 | VM_L2_CG__ENABLE_MASK0x40000, |
824 | }; |
825 | |
826 | static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev, |
827 | bool_Bool enable) |
828 | { |
829 | int i; |
830 | u32 orig, data; |
831 | |
832 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers)(sizeof((mc_cg_registers)) / sizeof((mc_cg_registers)[0])); i++) { |
833 | orig = data = RREG32(mc_cg_registers[i])amdgpu_device_rreg(adev, (mc_cg_registers[i]), 0); |
834 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS(1 << 8))) |
835 | data |= mc_cg_ls_en[i]; |
836 | else |
837 | data &= ~mc_cg_ls_en[i]; |
838 | if (data != orig) |
839 | WREG32(mc_cg_registers[i], data)amdgpu_device_wreg(adev, (mc_cg_registers[i]), (data), 0); |
840 | } |
841 | } |
842 | |
843 | static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev, |
844 | bool_Bool enable) |
845 | { |
846 | int i; |
847 | u32 orig, data; |
848 | |
849 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers)(sizeof((mc_cg_registers)) / sizeof((mc_cg_registers)[0])); i++) { |
850 | orig = data = RREG32(mc_cg_registers[i])amdgpu_device_rreg(adev, (mc_cg_registers[i]), 0); |
851 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG(1 << 9))) |
852 | data |= mc_cg_en[i]; |
853 | else |
854 | data &= ~mc_cg_en[i]; |
855 | if (data != orig) |
856 | WREG32(mc_cg_registers[i], data)amdgpu_device_wreg(adev, (mc_cg_registers[i]), (data), 0); |
857 | } |
858 | } |
859 | |
860 | static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev, |
861 | bool_Bool enable) |
862 | { |
863 | u32 orig, data; |
864 | |
865 | orig = data = RREG32_PCIE(ixPCIE_CNTL2)adev->pcie_rreg(adev, (0x140001c)); |
866 | |
867 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS(1 << 12))) { |
868 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1)(((data) & ~0x10000) | (0x10000 & ((1) << 0x10) )); |
869 | data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1)(((data) & ~0x40000) | (0x40000 & ((1) << 0x12) )); |
870 | data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1)(((data) & ~0x80000) | (0x80000 & ((1) << 0x13) )); |
871 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1)(((data) & ~0x20000) | (0x20000 & ((1) << 0x11) )); |
872 | } else { |
873 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0)(((data) & ~0x10000) | (0x10000 & ((0) << 0x10) )); |
874 | data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0)(((data) & ~0x40000) | (0x40000 & ((0) << 0x12) )); |
875 | data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0)(((data) & ~0x80000) | (0x80000 & ((0) << 0x13) )); |
876 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0)(((data) & ~0x20000) | (0x20000 & ((0) << 0x11) )); |
877 | } |
878 | |
879 | if (orig != data) |
880 | WREG32_PCIE(ixPCIE_CNTL2, data)adev->pcie_wreg(adev, (0x140001c), (data)); |
881 | } |
882 | |
883 | static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev, |
884 | bool_Bool enable) |
885 | { |
886 | u32 orig, data; |
887 | |
888 | orig = data = RREG32(mmHDP_HOST_PATH_CNTL)amdgpu_device_rreg(adev, (0xb00), 0); |
889 | |
890 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG(1 << 16))) |
891 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0)(((data) & ~0x800000) | (0x800000 & ((0) << 0x17 ))); |
892 | else |
893 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1)(((data) & ~0x800000) | (0x800000 & ((1) << 0x17 ))); |
894 | |
895 | if (orig != data) |
896 | WREG32(mmHDP_HOST_PATH_CNTL, data)amdgpu_device_wreg(adev, (0xb00), (data), 0); |
897 | } |
898 | |
899 | static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev, |
900 | bool_Bool enable) |
901 | { |
902 | u32 orig, data; |
903 | |
904 | orig = data = RREG32(mmHDP_MEM_POWER_LS)amdgpu_device_rreg(adev, (0xbd4), 0); |
905 | |
906 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS(1 << 15))) |
907 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1)(((data) & ~0x1) | (0x1 & ((1) << 0x0))); |
908 | else |
909 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0)(((data) & ~0x1) | (0x1 & ((0) << 0x0))); |
910 | |
911 | if (orig != data) |
912 | WREG32(mmHDP_MEM_POWER_LS, data)amdgpu_device_wreg(adev, (0xbd4), (data), 0); |
913 | } |
914 | |
915 | static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type) |
916 | { |
917 | switch (mc_seq_vram_type) { |
918 | case MC_SEQ_MISC0__MT__GDDR10x10000000: |
919 | return AMDGPU_VRAM_TYPE_GDDR11; |
920 | case MC_SEQ_MISC0__MT__DDR20x20000000: |
921 | return AMDGPU_VRAM_TYPE_DDR22; |
922 | case MC_SEQ_MISC0__MT__GDDR30x30000000: |
923 | return AMDGPU_VRAM_TYPE_GDDR33; |
924 | case MC_SEQ_MISC0__MT__GDDR40x40000000: |
925 | return AMDGPU_VRAM_TYPE_GDDR44; |
926 | case MC_SEQ_MISC0__MT__GDDR50x50000000: |
927 | return AMDGPU_VRAM_TYPE_GDDR55; |
928 | case MC_SEQ_MISC0__MT__HBM0x60000000: |
929 | return AMDGPU_VRAM_TYPE_HBM6; |
930 | case MC_SEQ_MISC0__MT__DDR30xB0000000: |
931 | return AMDGPU_VRAM_TYPE_DDR37; |
932 | default: |
933 | return AMDGPU_VRAM_TYPE_UNKNOWN0; |
934 | } |
935 | } |
936 | |
937 | static int gmc_v7_0_early_init(void *handle) |
938 | { |
939 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
940 | |
941 | gmc_v7_0_set_gmc_funcs(adev); |
942 | gmc_v7_0_set_irq_funcs(adev); |
943 | |
944 | adev->gmc.shared_aperture_start = 0x2000000000000000ULL; |
945 | adev->gmc.shared_aperture_end = |
946 | adev->gmc.shared_aperture_start + (4ULL << 30) - 1; |
947 | adev->gmc.private_aperture_start = |
948 | adev->gmc.shared_aperture_end + 1; |
949 | adev->gmc.private_aperture_end = |
950 | adev->gmc.private_aperture_start + (4ULL << 30) - 1; |
951 | |
952 | return 0; |
953 | } |
954 | |
955 | static int gmc_v7_0_late_init(void *handle) |
956 | { |
957 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
958 | |
959 | amdgpu_bo_late_init(adev); |
960 | |
961 | if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS2) |
962 | return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); |
963 | else |
964 | return 0; |
965 | } |
966 | |
967 | static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) |
968 | { |
969 | u32 d1vga_control = RREG32(mmD1VGA_CONTROL)amdgpu_device_rreg(adev, (0xcc), 0); |
970 | unsigned size; |
971 | |
972 | if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)(((d1vga_control) & 0x1) >> 0x0)) { |
973 | size = AMDGPU_VBIOS_VGA_ALLOCATION(9 * 1024 * 1024); |
974 | } else { |
975 | u32 viewport = RREG32(mmVIEWPORT_SIZE)amdgpu_device_rreg(adev, (0x1b5d), 0); |
976 | size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT)(((viewport) & 0x3fff) >> 0x0) * |
977 | REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH)(((viewport) & 0x3fff0000) >> 0x10) * |
978 | 4); |
979 | } |
980 | |
981 | return size; |
982 | } |
983 | |
984 | static int gmc_v7_0_sw_init(void *handle) |
985 | { |
986 | int r; |
987 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
988 | |
989 | adev->num_vmhubs = 1; |
990 | |
991 | if (adev->flags & AMD_IS_APU) { |
992 | adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN0; |
993 | } else { |
994 | u32 tmp = RREG32(mmMC_SEQ_MISC0)amdgpu_device_rreg(adev, (0xa80), 0); |
995 | tmp &= MC_SEQ_MISC0__MT__MASK0xf0000000; |
996 | adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp); |
997 | } |
998 | |
999 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY0, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT0x00000092, &adev->gmc.vm_fault); |
1000 | if (r) |
1001 | return r; |
1002 | |
1003 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY0, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT0x00000093, &adev->gmc.vm_fault); |
1004 | if (r) |
1005 | return r; |
1006 | |
1007 | /* Adjust VM size here. |
1008 | * Currently set to 4GB ((1 << 20) 4k pages). |
1009 | * Max GPUVM size for cayman and SI is 40 bits. |
1010 | */ |
1011 | amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); |
1012 | |
1013 | /* Set the internal MC address mask |
1014 | * This is the max address of the GPU's |
1015 | * internal address space. |
1016 | */ |
1017 | adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ |
1018 | |
1019 | r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40))0; |
1020 | if (r) { |
1021 | pr_warn("No suitable DMA available\n")printk("\0014" "amdgpu: " "No suitable DMA available\n"); |
1022 | return r; |
1023 | } |
1024 | adev->need_swiotlb = drm_need_swiotlb(40); |
1025 | |
1026 | r = gmc_v7_0_init_microcode(adev); |
1027 | if (r) { |
1028 | DRM_ERROR("Failed to load mc firmware!\n")__drm_err("Failed to load mc firmware!\n"); |
1029 | return r; |
1030 | } |
1031 | |
1032 | r = gmc_v7_0_mc_init(adev); |
1033 | if (r) |
1034 | return r; |
1035 | |
1036 | amdgpu_gmc_get_vbios_allocations(adev); |
1037 | |
1038 | /* Memory manager */ |
1039 | r = amdgpu_bo_init(adev); |
1040 | if (r) |
1041 | return r; |
1042 | |
1043 | r = gmc_v7_0_gart_init(adev); |
1044 | if (r) |
1045 | return r; |
1046 | |
1047 | /* |
1048 | * number of VMs |
1049 | * VMID 0 is reserved for System |
1050 | * amdgpu graphics/compute will use VMIDs 1-7 |
1051 | * amdkfd will use VMIDs 8-15 |
1052 | */ |
1053 | adev->vm_manager.first_kfd_vmid = 8; |
1054 | amdgpu_vm_manager_init(adev); |
1055 | |
1056 | /* base offset of vram pages */ |
1057 | if (adev->flags & AMD_IS_APU) { |
1058 | u64 tmp = RREG32(mmMC_VM_FB_OFFSET)amdgpu_device_rreg(adev, (0x81a), 0); |
1059 | |
1060 | tmp <<= 22; |
1061 | adev->vm_manager.vram_base_offset = tmp; |
1062 | } else { |
1063 | adev->vm_manager.vram_base_offset = 0; |
1064 | } |
1065 | |
1066 | adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info), |
1067 | GFP_KERNEL(0x0001 | 0x0004)); |
1068 | if (!adev->gmc.vm_fault_info) |
1069 | return -ENOMEM12; |
1070 | atomic_set(&adev->gmc.vm_fault_info_updated, 0)({ typeof(*(&adev->gmc.vm_fault_info_updated)) __tmp = ((0)); *(volatile typeof(*(&adev->gmc.vm_fault_info_updated )) *)&(*(&adev->gmc.vm_fault_info_updated)) = __tmp ; __tmp; }); |
1071 | |
1072 | return 0; |
1073 | } |
1074 | |
1075 | static int gmc_v7_0_sw_fini(void *handle) |
1076 | { |
1077 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1078 | |
1079 | amdgpu_gem_force_release(adev); |
1080 | amdgpu_vm_manager_fini(adev); |
1081 | kfree(adev->gmc.vm_fault_info); |
1082 | amdgpu_gart_table_vram_free(adev); |
1083 | amdgpu_bo_fini(adev); |
1084 | amdgpu_gart_fini(adev); |
1085 | release_firmware(adev->gmc.fw); |
1086 | adev->gmc.fw = NULL((void *)0); |
1087 | |
1088 | return 0; |
1089 | } |
1090 | |
1091 | static int gmc_v7_0_hw_init(void *handle) |
1092 | { |
1093 | int r; |
1094 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1095 | |
1096 | gmc_v7_0_init_golden_registers(adev); |
1097 | |
1098 | gmc_v7_0_mc_program(adev); |
1099 | |
1100 | if (!(adev->flags & AMD_IS_APU)) { |
1101 | r = gmc_v7_0_mc_load_microcode(adev); |
1102 | if (r) { |
1103 | DRM_ERROR("Failed to load MC firmware!\n")__drm_err("Failed to load MC firmware!\n"); |
1104 | return r; |
1105 | } |
1106 | } |
1107 | |
1108 | r = gmc_v7_0_gart_enable(adev); |
1109 | if (r) |
1110 | return r; |
1111 | |
1112 | return r; |
1113 | } |
1114 | |
1115 | static int gmc_v7_0_hw_fini(void *handle) |
1116 | { |
1117 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1118 | |
1119 | amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); |
1120 | gmc_v7_0_gart_disable(adev); |
1121 | |
1122 | return 0; |
1123 | } |
1124 | |
1125 | static int gmc_v7_0_suspend(void *handle) |
1126 | { |
1127 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1128 | |
1129 | gmc_v7_0_hw_fini(adev); |
1130 | |
1131 | return 0; |
1132 | } |
1133 | |
1134 | static int gmc_v7_0_resume(void *handle) |
1135 | { |
1136 | int r; |
1137 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1138 | |
1139 | r = gmc_v7_0_hw_init(adev); |
1140 | if (r) |
1141 | return r; |
1142 | |
1143 | amdgpu_vmid_reset_all(adev); |
1144 | |
1145 | return 0; |
1146 | } |
1147 | |
1148 | static bool_Bool gmc_v7_0_is_idle(void *handle) |
1149 | { |
1150 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1151 | u32 tmp = RREG32(mmSRBM_STATUS)amdgpu_device_rreg(adev, (0x394), 0); |
1152 | |
1153 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK0x200 | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK0x400 | |
1154 | SRBM_STATUS__MCC_BUSY_MASK0x800 | SRBM_STATUS__MCD_BUSY_MASK0x1000 | SRBM_STATUS__VMC_BUSY_MASK0x100)) |
1155 | return false0; |
1156 | |
1157 | return true1; |
1158 | } |
1159 | |
1160 | static int gmc_v7_0_wait_for_idle(void *handle) |
1161 | { |
1162 | unsigned i; |
1163 | u32 tmp; |
1164 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1165 | |
1166 | for (i = 0; i < adev->usec_timeout; i++) { |
1167 | /* read MC_STATUS */ |
1168 | tmp = RREG32(mmSRBM_STATUS)amdgpu_device_rreg(adev, (0x394), 0) & (SRBM_STATUS__MCB_BUSY_MASK0x200 | |
1169 | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK0x400 | |
1170 | SRBM_STATUS__MCC_BUSY_MASK0x800 | |
1171 | SRBM_STATUS__MCD_BUSY_MASK0x1000 | |
1172 | SRBM_STATUS__VMC_BUSY_MASK0x100); |
1173 | if (!tmp) |
1174 | return 0; |
1175 | udelay(1); |
1176 | } |
1177 | return -ETIMEDOUT60; |
1178 | |
1179 | } |
1180 | |
1181 | static int gmc_v7_0_soft_reset(void *handle) |
1182 | { |
1183 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1184 | u32 srbm_soft_reset = 0; |
1185 | u32 tmp = RREG32(mmSRBM_STATUS)amdgpu_device_rreg(adev, (0x394), 0); |
1186 | |
1187 | if (tmp & SRBM_STATUS__VMC_BUSY_MASK0x100) |
1188 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,(((srbm_soft_reset) & ~0x20000) | (0x20000 & ((1) << 0x11))) |
1189 | SRBM_SOFT_RESET, SOFT_RESET_VMC, 1)(((srbm_soft_reset) & ~0x20000) | (0x20000 & ((1) << 0x11))); |
1190 | |
1191 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK0x200 | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK0x400 | |
1192 | SRBM_STATUS__MCC_BUSY_MASK0x800 | SRBM_STATUS__MCD_BUSY_MASK0x1000)) { |
1193 | if (!(adev->flags & AMD_IS_APU)) |
1194 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,(((srbm_soft_reset) & ~0x800) | (0x800 & ((1) << 0xb))) |
1195 | SRBM_SOFT_RESET, SOFT_RESET_MC, 1)(((srbm_soft_reset) & ~0x800) | (0x800 & ((1) << 0xb))); |
1196 | } |
1197 | |
1198 | if (srbm_soft_reset) { |
1199 | gmc_v7_0_mc_stop(adev); |
1200 | if (gmc_v7_0_wait_for_idle((void *)adev)) { |
1201 | dev_warn(adev->dev, "Wait for GMC idle timed out !\n")printf("drm:pid%d:%s *WARNING* " "Wait for GMC idle timed out !\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
1202 | } |
1203 | |
1204 | |
1205 | tmp = RREG32(mmSRBM_SOFT_RESET)amdgpu_device_rreg(adev, (0x398), 0); |
1206 | tmp |= srbm_soft_reset; |
1207 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp)do { } while(0); |
1208 | WREG32(mmSRBM_SOFT_RESET, tmp)amdgpu_device_wreg(adev, (0x398), (tmp), 0); |
1209 | tmp = RREG32(mmSRBM_SOFT_RESET)amdgpu_device_rreg(adev, (0x398), 0); |
1210 | |
1211 | udelay(50); |
1212 | |
1213 | tmp &= ~srbm_soft_reset; |
1214 | WREG32(mmSRBM_SOFT_RESET, tmp)amdgpu_device_wreg(adev, (0x398), (tmp), 0); |
1215 | tmp = RREG32(mmSRBM_SOFT_RESET)amdgpu_device_rreg(adev, (0x398), 0); |
Value stored to 'tmp' is never read | |
1216 | |
1217 | /* Wait a little for things to settle down */ |
1218 | udelay(50); |
1219 | |
1220 | gmc_v7_0_mc_resume(adev); |
1221 | udelay(50); |
1222 | } |
1223 | |
1224 | return 0; |
1225 | } |
1226 | |
1227 | static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev, |
1228 | struct amdgpu_irq_src *src, |
1229 | unsigned type, |
1230 | enum amdgpu_interrupt_state state) |
1231 | { |
1232 | u32 tmp; |
1233 | u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x8 | |
1234 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x40 | |
1235 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x200 | |
1236 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x1000 | |
1237 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x8000 | |
1238 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK0x40000); |
1239 | |
1240 | switch (state) { |
1241 | case AMDGPU_IRQ_STATE_DISABLE: |
1242 | /* system context */ |
1243 | tmp = RREG32(mmVM_CONTEXT0_CNTL)amdgpu_device_rreg(adev, (0x504), 0); |
1244 | tmp &= ~bits; |
1245 | WREG32(mmVM_CONTEXT0_CNTL, tmp)amdgpu_device_wreg(adev, (0x504), (tmp), 0); |
1246 | /* VMs */ |
1247 | tmp = RREG32(mmVM_CONTEXT1_CNTL)amdgpu_device_rreg(adev, (0x505), 0); |
1248 | tmp &= ~bits; |
1249 | WREG32(mmVM_CONTEXT1_CNTL, tmp)amdgpu_device_wreg(adev, (0x505), (tmp), 0); |
1250 | break; |
1251 | case AMDGPU_IRQ_STATE_ENABLE: |
1252 | /* system context */ |
1253 | tmp = RREG32(mmVM_CONTEXT0_CNTL)amdgpu_device_rreg(adev, (0x504), 0); |
1254 | tmp |= bits; |
1255 | WREG32(mmVM_CONTEXT0_CNTL, tmp)amdgpu_device_wreg(adev, (0x504), (tmp), 0); |
1256 | /* VMs */ |
1257 | tmp = RREG32(mmVM_CONTEXT1_CNTL)amdgpu_device_rreg(adev, (0x505), 0); |
1258 | tmp |= bits; |
1259 | WREG32(mmVM_CONTEXT1_CNTL, tmp)amdgpu_device_wreg(adev, (0x505), (tmp), 0); |
1260 | break; |
1261 | default: |
1262 | break; |
1263 | } |
1264 | |
1265 | return 0; |
1266 | } |
1267 | |
1268 | static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, |
1269 | struct amdgpu_irq_src *source, |
1270 | struct amdgpu_iv_entry *entry) |
1271 | { |
1272 | u32 addr, status, mc_client, vmid; |
1273 | |
1274 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)amdgpu_device_rreg(adev, (0x53f), 0); |
1275 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)amdgpu_device_rreg(adev, (0x537), 0); |
1276 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT)amdgpu_device_rreg(adev, (0x539), 0); |
1277 | /* reset addr and status */ |
1278 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1)do { uint32_t tmp_ = amdgpu_device_rreg(adev, (0x50d), 0); tmp_ &= (~1); tmp_ |= ((1) & ~(~1)); amdgpu_device_wreg(adev , (0x50d), (tmp_), 0); } while (0); |
1279 | |
1280 | if (!addr && !status) |
1281 | return 0; |
1282 | |
1283 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST1) |
1284 | gmc_v7_0_set_fault_enable_default(adev, false0); |
1285 | |
1286 | if (printk_ratelimit()1) { |
1287 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",printf("drm:pid%d:%s *ERROR* " "GPU fault detected: %d 0x%08x\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , entry-> src_id, entry->src_data[0]) |
1288 | entry->src_id, entry->src_data[0])printf("drm:pid%d:%s *ERROR* " "GPU fault detected: %d 0x%08x\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , entry-> src_id, entry->src_data[0]); |
1289 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",printf("drm:pid%d:%s *ERROR* " " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , addr) |
1290 | addr)printf("drm:pid%d:%s *ERROR* " " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , addr); |
1291 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",printf("drm:pid%d:%s *ERROR* " " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , status ) |
1292 | status)printf("drm:pid%d:%s *ERROR* " " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , status ); |
1293 | gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client, |
1294 | entry->pasid); |
1295 | } |
1296 | |
1297 | vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,(((status) & 0x1e000000) >> 0x19) |
1298 | VMID)(((status) & 0x1e000000) >> 0x19); |
1299 | if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) |
1300 | && !atomic_read(&adev->gmc.vm_fault_info_updated)({ typeof(*(&adev->gmc.vm_fault_info_updated)) __tmp = *(volatile typeof(*(&adev->gmc.vm_fault_info_updated) ) *)&(*(&adev->gmc.vm_fault_info_updated)); membar_datadep_consumer (); __tmp; })) { |
1301 | struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; |
1302 | u32 protections = REG_GET_FIELD(status,(((status) & 0xff) >> 0x0) |
1303 | VM_CONTEXT1_PROTECTION_FAULT_STATUS,(((status) & 0xff) >> 0x0) |
1304 | PROTECTIONS)(((status) & 0xff) >> 0x0); |
1305 | |
1306 | info->vmid = vmid; |
1307 | info->mc_id = REG_GET_FIELD(status,(((status) & 0x1ff000) >> 0xc) |
1308 | VM_CONTEXT1_PROTECTION_FAULT_STATUS,(((status) & 0x1ff000) >> 0xc) |
1309 | MEMORY_CLIENT_ID)(((status) & 0x1ff000) >> 0xc); |
1310 | info->status = status; |
1311 | info->page_addr = addr; |
1312 | info->prot_valid = protections & 0x7 ? true1 : false0; |
1313 | info->prot_read = protections & 0x8 ? true1 : false0; |
1314 | info->prot_write = protections & 0x10 ? true1 : false0; |
1315 | info->prot_exec = protections & 0x20 ? true1 : false0; |
1316 | mb()do { __asm volatile("mfence" ::: "memory"); } while (0); |
1317 | atomic_set(&adev->gmc.vm_fault_info_updated, 1)({ typeof(*(&adev->gmc.vm_fault_info_updated)) __tmp = ((1)); *(volatile typeof(*(&adev->gmc.vm_fault_info_updated )) *)&(*(&adev->gmc.vm_fault_info_updated)) = __tmp ; __tmp; }); |
1318 | } |
1319 | |
1320 | return 0; |
1321 | } |
1322 | |
1323 | static int gmc_v7_0_set_clockgating_state(void *handle, |
1324 | enum amd_clockgating_state state) |
1325 | { |
1326 | bool_Bool gate = false0; |
1327 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1328 | |
1329 | if (state == AMD_CG_STATE_GATE) |
1330 | gate = true1; |
1331 | |
1332 | if (!(adev->flags & AMD_IS_APU)) { |
1333 | gmc_v7_0_enable_mc_mgcg(adev, gate); |
1334 | gmc_v7_0_enable_mc_ls(adev, gate); |
1335 | } |
1336 | gmc_v7_0_enable_bif_mgls(adev, gate); |
1337 | gmc_v7_0_enable_hdp_mgcg(adev, gate); |
1338 | gmc_v7_0_enable_hdp_ls(adev, gate); |
1339 | |
1340 | return 0; |
1341 | } |
1342 | |
1343 | static int gmc_v7_0_set_powergating_state(void *handle, |
1344 | enum amd_powergating_state state) |
1345 | { |
1346 | return 0; |
1347 | } |
1348 | |
1349 | static const struct amd_ip_funcs gmc_v7_0_ip_funcs = { |
1350 | .name = "gmc_v7_0", |
1351 | .early_init = gmc_v7_0_early_init, |
1352 | .late_init = gmc_v7_0_late_init, |
1353 | .sw_init = gmc_v7_0_sw_init, |
1354 | .sw_fini = gmc_v7_0_sw_fini, |
1355 | .hw_init = gmc_v7_0_hw_init, |
1356 | .hw_fini = gmc_v7_0_hw_fini, |
1357 | .suspend = gmc_v7_0_suspend, |
1358 | .resume = gmc_v7_0_resume, |
1359 | .is_idle = gmc_v7_0_is_idle, |
1360 | .wait_for_idle = gmc_v7_0_wait_for_idle, |
1361 | .soft_reset = gmc_v7_0_soft_reset, |
1362 | .set_clockgating_state = gmc_v7_0_set_clockgating_state, |
1363 | .set_powergating_state = gmc_v7_0_set_powergating_state, |
1364 | }; |
1365 | |
1366 | static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { |
1367 | .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb, |
1368 | .flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid, |
1369 | .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb, |
1370 | .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, |
1371 | .set_prt = gmc_v7_0_set_prt, |
1372 | .get_vm_pde = gmc_v7_0_get_vm_pde, |
1373 | .get_vm_pte = gmc_v7_0_get_vm_pte, |
1374 | .get_vbios_fb_size = gmc_v7_0_get_vbios_fb_size, |
1375 | }; |
1376 | |
1377 | static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { |
1378 | .set = gmc_v7_0_vm_fault_interrupt_state, |
1379 | .process = gmc_v7_0_process_interrupt, |
1380 | }; |
1381 | |
1382 | static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev) |
1383 | { |
1384 | adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs; |
1385 | } |
1386 | |
1387 | static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) |
1388 | { |
1389 | adev->gmc.vm_fault.num_types = 1; |
1390 | adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs; |
1391 | } |
1392 | |
1393 | const struct amdgpu_ip_block_version gmc_v7_0_ip_block = |
1394 | { |
1395 | .type = AMD_IP_BLOCK_TYPE_GMC, |
1396 | .major = 7, |
1397 | .minor = 0, |
1398 | .rev = 0, |
1399 | .funcs = &gmc_v7_0_ip_funcs, |
1400 | }; |
1401 | |
1402 | const struct amdgpu_ip_block_version gmc_v7_4_ip_block = |
1403 | { |
1404 | .type = AMD_IP_BLOCK_TYPE_GMC, |
1405 | .major = 7, |
1406 | .minor = 4, |
1407 | .rev = 0, |
1408 | .funcs = &gmc_v7_0_ip_funcs, |
1409 | }; |