File: | dev/pci/drm/amd/amdgpu/gmc_v10_0.c |
Warning: | line 797, column 3 Value stored to 'r' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | #include <linux/firmware.h> |
24 | #include <linux/pci.h> |
25 | #include "amdgpu.h" |
26 | #include "amdgpu_atomfirmware.h" |
27 | #include "gmc_v10_0.h" |
28 | #include "umc_v8_7.h" |
29 | |
30 | #include "hdp/hdp_5_0_0_offset.h" |
31 | #include "hdp/hdp_5_0_0_sh_mask.h" |
32 | #include "athub/athub_2_0_0_sh_mask.h" |
33 | #include "athub/athub_2_0_0_offset.h" |
34 | #include "dcn/dcn_2_0_0_offset.h" |
35 | #include "dcn/dcn_2_0_0_sh_mask.h" |
36 | #include "oss/osssys_5_0_0_offset.h" |
37 | #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" |
38 | #include "navi10_enum.h" |
39 | |
40 | #include "soc15.h" |
41 | #include "soc15d.h" |
42 | #include "soc15_common.h" |
43 | |
44 | #include "nbio_v2_3.h" |
45 | |
46 | #include "gfxhub_v2_0.h" |
47 | #include "gfxhub_v2_1.h" |
48 | #include "mmhub_v2_0.h" |
49 | #include "athub_v2_0.h" |
50 | #include "athub_v2_1.h" |
51 | |
52 | #if 0 |
53 | static const struct soc15_reg_golden golden_settings_navi10_hdp[] = |
54 | { |
55 | /* TODO add golden setting for hdp */ |
56 | }; |
57 | #endif |
58 | |
59 | static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev, |
60 | struct amdgpu_irq_src *src, |
61 | unsigned type, |
62 | enum amdgpu_interrupt_state state) |
63 | { |
64 | return 0; |
65 | } |
66 | |
67 | static int |
68 | gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, |
69 | struct amdgpu_irq_src *src, unsigned type, |
70 | enum amdgpu_interrupt_state state) |
71 | { |
72 | switch (state) { |
73 | case AMDGPU_IRQ_STATE_DISABLE: |
74 | /* MM HUB */ |
75 | amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_01, false0); |
76 | /* GFX HUB */ |
77 | amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_00, false0); |
78 | break; |
79 | case AMDGPU_IRQ_STATE_ENABLE: |
80 | /* MM HUB */ |
81 | amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_01, true1); |
82 | /* GFX HUB */ |
83 | amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_00, true1); |
84 | break; |
85 | default: |
86 | break; |
87 | } |
88 | |
89 | return 0; |
90 | } |
91 | |
92 | static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, |
93 | struct amdgpu_irq_src *source, |
94 | struct amdgpu_iv_entry *entry) |
95 | { |
96 | struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; |
97 | uint32_t status = 0; |
98 | u64 addr; |
99 | |
100 | addr = (u64)entry->src_data[0] << 12; |
101 | addr |= ((u64)entry->src_data[1] & 0xf) << 44; |
102 | |
103 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { |
104 | /* |
105 | * Issue a dummy read to wait for the status register to |
106 | * be updated to avoid reading an incorrect value due to |
107 | * the new fast GRBM interface. |
108 | */ |
109 | if (entry->vmid_src == AMDGPU_GFXHUB_00) |
110 | RREG32(hub->vm_l2_pro_fault_status)amdgpu_device_rreg(adev, (hub->vm_l2_pro_fault_status), 0); |
111 | |
112 | status = RREG32(hub->vm_l2_pro_fault_status)amdgpu_device_rreg(adev, (hub->vm_l2_pro_fault_status), 0); |
113 | WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1)do { uint32_t tmp_ = amdgpu_device_rreg(adev, (hub->vm_l2_pro_fault_cntl ), 0); tmp_ &= (~1); tmp_ |= ((1) & ~(~1)); amdgpu_device_wreg (adev, (hub->vm_l2_pro_fault_cntl), (tmp_), 0); } while (0 ); |
114 | } |
115 | |
116 | if (printk_ratelimit()1) { |
117 | struct amdgpu_task_info task_info; |
118 | |
119 | memset(&task_info, 0, sizeof(struct amdgpu_task_info))__builtin_memset((&task_info), (0), (sizeof(struct amdgpu_task_info ))); |
120 | amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); |
121 | |
122 | dev_err(adev->dev,printf("drm:pid%d:%s *ERROR* " "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " "for process %s pid %d thread %s pid %d)\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , entry->vmid_src ? "mmhub" : "gfxhub", entry ->src_id, entry->ring_id, entry->vmid, entry->pasid , task_info.process_name, task_info.tgid, task_info.task_name , task_info.pid) |
123 | "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "printf("drm:pid%d:%s *ERROR* " "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " "for process %s pid %d thread %s pid %d)\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , entry->vmid_src ? "mmhub" : "gfxhub", entry ->src_id, entry->ring_id, entry->vmid, entry->pasid , task_info.process_name, task_info.tgid, task_info.task_name , task_info.pid) |
124 | "for process %s pid %d thread %s pid %d)\n",printf("drm:pid%d:%s *ERROR* " "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " "for process %s pid %d thread %s pid %d)\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , entry->vmid_src ? "mmhub" : "gfxhub", entry ->src_id, entry->ring_id, entry->vmid, entry->pasid , task_info.process_name, task_info.tgid, task_info.task_name , task_info.pid) |
125 | entry->vmid_src ? "mmhub" : "gfxhub",printf("drm:pid%d:%s *ERROR* " "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " "for process %s pid %d thread %s pid %d)\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , entry->vmid_src ? "mmhub" : "gfxhub", entry ->src_id, entry->ring_id, entry->vmid, entry->pasid , task_info.process_name, task_info.tgid, task_info.task_name , task_info.pid) |
126 | entry->src_id, entry->ring_id, entry->vmid,printf("drm:pid%d:%s *ERROR* " "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " "for process %s pid %d thread %s pid %d)\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , entry->vmid_src ? "mmhub" : "gfxhub", entry ->src_id, entry->ring_id, entry->vmid, entry->pasid , task_info.process_name, task_info.tgid, task_info.task_name , task_info.pid) |
127 | entry->pasid, task_info.process_name, task_info.tgid,printf("drm:pid%d:%s *ERROR* " "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " "for process %s pid %d thread %s pid %d)\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , entry->vmid_src ? "mmhub" : "gfxhub", entry ->src_id, entry->ring_id, entry->vmid, entry->pasid , task_info.process_name, task_info.tgid, task_info.task_name , task_info.pid) |
128 | task_info.task_name, task_info.pid)printf("drm:pid%d:%s *ERROR* " "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, " "for process %s pid %d thread %s pid %d)\n", ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p-> ps_pid, __func__ , entry->vmid_src ? "mmhub" : "gfxhub", entry ->src_id, entry->ring_id, entry->vmid, entry->pasid , task_info.process_name, task_info.tgid, task_info.task_name , task_info.pid); |
129 | dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",printf("drm:pid%d:%s *ERROR* " " in page starting at address 0x%016llx from client %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , addr, entry ->client_id) |
130 | addr, entry->client_id)printf("drm:pid%d:%s *ERROR* " " in page starting at address 0x%016llx from client %d\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , addr, entry ->client_id); |
131 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) |
132 | hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); |
133 | } |
134 | |
135 | return 0; |
136 | } |
137 | |
138 | static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = { |
139 | .set = gmc_v10_0_vm_fault_interrupt_state, |
140 | .process = gmc_v10_0_process_interrupt, |
141 | }; |
142 | |
143 | static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = { |
144 | .set = gmc_v10_0_ecc_interrupt_state, |
145 | .process = amdgpu_umc_process_ecc_irq, |
146 | }; |
147 | |
148 | static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) |
149 | { |
150 | adev->gmc.vm_fault.num_types = 1; |
151 | adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs; |
152 | |
153 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { |
154 | adev->gmc.ecc_irq.num_types = 1; |
155 | adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs; |
156 | } |
157 | } |
158 | |
159 | /** |
160 | * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore |
161 | * |
162 | * @adev: amdgpu_device pointer |
163 | * @vmhub: vmhub type |
164 | * |
165 | */ |
166 | static bool_Bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev, |
167 | uint32_t vmhub) |
168 | { |
169 | return ((vmhub == AMDGPU_MMHUB_01 || |
170 | vmhub == AMDGPU_MMHUB_12) && |
171 | (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2)))); |
172 | } |
173 | |
174 | static bool_Bool gmc_v10_0_get_atc_vmid_pasid_mapping_info( |
175 | struct amdgpu_device *adev, |
176 | uint8_t vmid, uint16_t *p_pasid) |
177 | { |
178 | uint32_t value; |
179 | |
180 | value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)amdgpu_device_rreg(adev, ((adev->reg_offset[ATHUB_HWIP][0] [0] + 0x000c) + vmid), 0) |
181 | + vmid)amdgpu_device_rreg(adev, ((adev->reg_offset[ATHUB_HWIP][0] [0] + 0x000c) + vmid), 0); |
182 | *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK0x0000FFFFL; |
183 | |
184 | return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK0x80000000L); |
185 | } |
186 | |
187 | /* |
188 | * GART |
189 | * VMID 0 is the physical GPU addresses as used by the kernel. |
190 | * VMIDs 1-15 are used for userspace clients and are handled |
191 | * by the amdgpu vm/hsa code. |
192 | */ |
193 | |
194 | static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, |
195 | unsigned int vmhub, uint32_t flush_type) |
196 | { |
197 | bool_Bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); |
198 | struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; |
199 | u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); |
200 | u32 tmp; |
201 | /* Use register 17 for GART */ |
202 | const unsigned eng = 17; |
203 | unsigned int i; |
204 | |
205 | spin_lock(&adev->gmc.invalidate_lock)mtx_enter(&adev->gmc.invalidate_lock); |
206 | /* |
207 | * It may lose gpuvm invalidate acknowldege state across power-gating |
208 | * off cycle, add semaphore acquire before invalidation and semaphore |
209 | * release after invalidation to avoid entering power gated state |
210 | * to WA the Issue |
211 | */ |
212 | |
213 | /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ |
214 | if (use_semaphore) { |
215 | for (i = 0; i < adev->usec_timeout; i++) { |
216 | /* a read return value of 1 means semaphore acuqire */ |
217 | tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +amdgpu_device_rreg(adev, (hub->vm_inv_eng0_sem + hub->eng_distance * eng), (1<<1)) |
218 | hub->eng_distance * eng)amdgpu_device_rreg(adev, (hub->vm_inv_eng0_sem + hub->eng_distance * eng), (1<<1)); |
219 | if (tmp & 0x1) |
220 | break; |
221 | udelay(1); |
222 | } |
223 | |
224 | if (i >= adev->usec_timeout) |
225 | DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n")__drm_err("Timeout waiting for sem acquire in VM flush!\n"); |
226 | } |
227 | |
228 | WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req)amdgpu_device_wreg(adev, (hub->vm_inv_eng0_req + hub->eng_distance * eng), (inv_req), (1<<1)); |
229 | |
230 | /* |
231 | * Issue a dummy read to wait for the ACK register to be cleared |
232 | * to avoid a false ACK due to the new fast GRBM interface. |
233 | */ |
234 | if (vmhub == AMDGPU_GFXHUB_00) |
235 | RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng)amdgpu_device_rreg(adev, (hub->vm_inv_eng0_req + hub->eng_distance * eng), (1<<1)); |
236 | |
237 | /* Wait for ACK with a delay.*/ |
238 | for (i = 0; i < adev->usec_timeout; i++) { |
239 | tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +amdgpu_device_rreg(adev, (hub->vm_inv_eng0_ack + hub->eng_distance * eng), (1<<1)) |
240 | hub->eng_distance * eng)amdgpu_device_rreg(adev, (hub->vm_inv_eng0_ack + hub->eng_distance * eng), (1<<1)); |
241 | tmp &= 1 << vmid; |
242 | if (tmp) |
243 | break; |
244 | |
245 | udelay(1); |
246 | } |
247 | |
248 | /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ |
249 | if (use_semaphore) |
250 | /* |
251 | * add semaphore release after invalidation, |
252 | * write with 0 means semaphore release |
253 | */ |
254 | WREG32_NO_KIQ(hub->vm_inv_eng0_sem +amdgpu_device_wreg(adev, (hub->vm_inv_eng0_sem + hub->eng_distance * eng), (0), (1<<1)) |
255 | hub->eng_distance * eng, 0)amdgpu_device_wreg(adev, (hub->vm_inv_eng0_sem + hub->eng_distance * eng), (0), (1<<1)); |
256 | |
257 | spin_unlock(&adev->gmc.invalidate_lock)mtx_leave(&adev->gmc.invalidate_lock); |
258 | |
259 | if (i < adev->usec_timeout) |
260 | return; |
261 | |
262 | DRM_ERROR("Timeout waiting for VM flush ACK!\n")__drm_err("Timeout waiting for VM flush ACK!\n"); |
263 | } |
264 | |
265 | /** |
266 | * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback |
267 | * |
268 | * @adev: amdgpu_device pointer |
269 | * @vmid: vm instance to flush |
270 | * |
271 | * Flush the TLB for the requested page table. |
272 | */ |
273 | static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, |
274 | uint32_t vmhub, uint32_t flush_type) |
275 | { |
276 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
277 | struct dma_fence *fence; |
278 | struct amdgpu_job *job; |
279 | |
280 | int r; |
281 | |
282 | /* flush hdp cache */ |
283 | adev->nbio.funcs->hdp_flush(adev, NULL((void *)0)); |
284 | |
285 | /* For SRIOV run time, driver shouldn't access the register through MMIO |
286 | * Directly use kiq to do the vm invalidation instead |
287 | */ |
288 | if (adev->gfx.kiq.ring.sched.ready && |
289 | (amdgpu_sriov_runtime(adev)((adev)->virt.caps & (1 << 4)) || !amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) && |
290 | down_read_trylock(&adev->reset_sem)(rw_enter(&adev->reset_sem, 0x0002UL | 0x0040UL) == 0)) { |
291 | struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; |
292 | const unsigned eng = 17; |
293 | u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); |
294 | u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; |
295 | u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; |
296 | |
297 | amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, |
298 | 1 << vmid); |
299 | |
300 | up_read(&adev->reset_sem)rw_exit_read(&adev->reset_sem); |
301 | return; |
302 | } |
303 | |
304 | mutex_lock(&adev->mman.gtt_window_lock)rw_enter_write(&adev->mman.gtt_window_lock); |
305 | |
306 | if (vmhub == AMDGPU_MMHUB_01) { |
307 | gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_01, 0); |
308 | mutex_unlock(&adev->mman.gtt_window_lock)rw_exit_write(&adev->mman.gtt_window_lock); |
309 | return; |
310 | } |
311 | |
312 | BUG_ON(vmhub != AMDGPU_GFXHUB_0)((!(vmhub != 0)) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/gmc_v10_0.c" , 312, "!(vmhub != 0)")); |
313 | |
314 | if (!adev->mman.buffer_funcs_enabled || |
315 | !adev->ib_pool_ready || |
316 | amdgpu_in_reset(adev) || |
317 | ring->sched.ready == false0) { |
318 | gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_00, 0); |
319 | mutex_unlock(&adev->mman.gtt_window_lock)rw_exit_write(&adev->mman.gtt_window_lock); |
320 | return; |
321 | } |
322 | |
323 | /* The SDMA on Navi has a bug which can theoretically result in memory |
324 | * corruption if an invalidation happens at the same time as an VA |
325 | * translation. Avoid this by doing the invalidation from the SDMA |
326 | * itself. |
327 | */ |
328 | r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, |
329 | &job); |
330 | if (r) |
331 | goto error_alloc; |
332 | |
333 | job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo); |
334 | job->vm_needs_flush = true1; |
335 | job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; |
336 | amdgpu_ring_pad_ib(ring, &job->ibs[0])((ring)->funcs->pad_ib((ring), (&job->ibs[0]))); |
337 | r = amdgpu_job_submit(job, &adev->mman.entity, |
338 | AMDGPU_FENCE_OWNER_UNDEFINED((void *)0ul), &fence); |
339 | if (r) |
340 | goto error_submit; |
341 | |
342 | mutex_unlock(&adev->mman.gtt_window_lock)rw_exit_write(&adev->mman.gtt_window_lock); |
343 | |
344 | dma_fence_wait(fence, false0); |
345 | dma_fence_put(fence); |
346 | |
347 | return; |
348 | |
349 | error_submit: |
350 | amdgpu_job_free(job); |
351 | |
352 | error_alloc: |
353 | mutex_unlock(&adev->mman.gtt_window_lock)rw_exit_write(&adev->mman.gtt_window_lock); |
354 | DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r)__drm_err("Error flushing GPU TLB using the SDMA (%d)!\n", r); |
355 | } |
356 | |
357 | /** |
358 | * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid |
359 | * |
360 | * @adev: amdgpu_device pointer |
361 | * @pasid: pasid to be flush |
362 | * |
363 | * Flush the TLB for the requested pasid. |
364 | */ |
365 | static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, |
366 | uint16_t pasid, uint32_t flush_type, |
367 | bool_Bool all_hub) |
368 | { |
369 | int vmid, i; |
370 | signed long r; |
371 | uint32_t seq; |
372 | uint16_t queried_pasid; |
373 | bool_Bool ret; |
374 | struct amdgpu_ring *ring = &adev->gfx.kiq.ring; |
375 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; |
376 | |
377 | if (amdgpu_emu_mode == 0 && ring->sched.ready) { |
378 | spin_lock(&adev->gfx.kiq.ring_lock)mtx_enter(&adev->gfx.kiq.ring_lock); |
379 | /* 2 dwords flush + 8 dwords fence */ |
380 | amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); |
381 | kiq->pmf->kiq_invalidate_tlbs(ring, |
382 | pasid, flush_type, all_hub); |
383 | r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT5000); |
384 | if (r) { |
385 | amdgpu_ring_undo(ring); |
386 | spin_unlock(&adev->gfx.kiq.ring_lock)mtx_leave(&adev->gfx.kiq.ring_lock); |
387 | return -ETIME60; |
388 | } |
389 | |
390 | amdgpu_ring_commit(ring); |
391 | spin_unlock(&adev->gfx.kiq.ring_lock)mtx_leave(&adev->gfx.kiq.ring_lock); |
392 | r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); |
393 | if (r < 1) { |
394 | dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r)printf("drm:pid%d:%s *ERROR* " "wait for kiq fence error: %ld.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__ , r); |
395 | return -ETIME60; |
396 | } |
397 | |
398 | return 0; |
399 | } |
400 | |
401 | for (vmid = 1; vmid < 16; vmid++) { |
402 | |
403 | ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid, |
404 | &queried_pasid); |
405 | if (ret && queried_pasid == pasid) { |
406 | if (all_hub) { |
407 | for (i = 0; i < adev->num_vmhubs; i++) |
408 | gmc_v10_0_flush_gpu_tlb(adev, vmid, |
409 | i, flush_type); |
410 | } else { |
411 | gmc_v10_0_flush_gpu_tlb(adev, vmid, |
412 | AMDGPU_GFXHUB_00, flush_type); |
413 | } |
414 | break; |
415 | } |
416 | } |
417 | |
418 | return 0; |
419 | } |
420 | |
421 | static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, |
422 | unsigned vmid, uint64_t pd_addr) |
423 | { |
424 | bool_Bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub); |
425 | struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; |
426 | uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); |
427 | unsigned eng = ring->vm_inv_eng; |
428 | |
429 | /* |
430 | * It may lose gpuvm invalidate acknowldege state across power-gating |
431 | * off cycle, add semaphore acquire before invalidation and semaphore |
432 | * release after invalidation to avoid entering power gated state |
433 | * to WA the Issue |
434 | */ |
435 | |
436 | /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ |
437 | if (use_semaphore) |
438 | /* a read return value of 1 means semaphore acuqire */ |
439 | amdgpu_ring_emit_reg_wait(ring,(ring)->funcs->emit_reg_wait((ring), (hub->vm_inv_eng0_sem + hub->eng_distance * eng), (0x1), (0x1)) |
440 | hub->vm_inv_eng0_sem +(ring)->funcs->emit_reg_wait((ring), (hub->vm_inv_eng0_sem + hub->eng_distance * eng), (0x1), (0x1)) |
441 | hub->eng_distance * eng, 0x1, 0x1)(ring)->funcs->emit_reg_wait((ring), (hub->vm_inv_eng0_sem + hub->eng_distance * eng), (0x1), (0x1)); |
442 | |
443 | amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +(ring)->funcs->emit_wreg((ring), (hub->ctx0_ptb_addr_lo32 + (hub->ctx_addr_distance * vmid)), (((u32)(pd_addr)))) |
444 | (hub->ctx_addr_distance * vmid),(ring)->funcs->emit_wreg((ring), (hub->ctx0_ptb_addr_lo32 + (hub->ctx_addr_distance * vmid)), (((u32)(pd_addr)))) |
445 | lower_32_bits(pd_addr))(ring)->funcs->emit_wreg((ring), (hub->ctx0_ptb_addr_lo32 + (hub->ctx_addr_distance * vmid)), (((u32)(pd_addr)))); |
446 | |
447 | amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +(ring)->funcs->emit_wreg((ring), (hub->ctx0_ptb_addr_hi32 + (hub->ctx_addr_distance * vmid)), (((u32)(((pd_addr) >> 16) >> 16)))) |
448 | (hub->ctx_addr_distance * vmid),(ring)->funcs->emit_wreg((ring), (hub->ctx0_ptb_addr_hi32 + (hub->ctx_addr_distance * vmid)), (((u32)(((pd_addr) >> 16) >> 16)))) |
449 | upper_32_bits(pd_addr))(ring)->funcs->emit_wreg((ring), (hub->ctx0_ptb_addr_hi32 + (hub->ctx_addr_distance * vmid)), (((u32)(((pd_addr) >> 16) >> 16)))); |
450 | |
451 | amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +(ring)->funcs->emit_reg_write_reg_wait((ring), (hub-> vm_inv_eng0_req + hub->eng_distance * eng), (hub->vm_inv_eng0_ack + hub->eng_distance * eng), (req), (1 << vmid)) |
452 | hub->eng_distance * eng,(ring)->funcs->emit_reg_write_reg_wait((ring), (hub-> vm_inv_eng0_req + hub->eng_distance * eng), (hub->vm_inv_eng0_ack + hub->eng_distance * eng), (req), (1 << vmid)) |
453 | hub->vm_inv_eng0_ack +(ring)->funcs->emit_reg_write_reg_wait((ring), (hub-> vm_inv_eng0_req + hub->eng_distance * eng), (hub->vm_inv_eng0_ack + hub->eng_distance * eng), (req), (1 << vmid)) |
454 | hub->eng_distance * eng,(ring)->funcs->emit_reg_write_reg_wait((ring), (hub-> vm_inv_eng0_req + hub->eng_distance * eng), (hub->vm_inv_eng0_ack + hub->eng_distance * eng), (req), (1 << vmid)) |
455 | req, 1 << vmid)(ring)->funcs->emit_reg_write_reg_wait((ring), (hub-> vm_inv_eng0_req + hub->eng_distance * eng), (hub->vm_inv_eng0_ack + hub->eng_distance * eng), (req), (1 << vmid)); |
456 | |
457 | /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ |
458 | if (use_semaphore) |
459 | /* |
460 | * add semaphore release after invalidation, |
461 | * write with 0 means semaphore release |
462 | */ |
463 | amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +(ring)->funcs->emit_wreg((ring), (hub->vm_inv_eng0_sem + hub->eng_distance * eng), (0)) |
464 | hub->eng_distance * eng, 0)(ring)->funcs->emit_wreg((ring), (hub->vm_inv_eng0_sem + hub->eng_distance * eng), (0)); |
465 | |
466 | return pd_addr; |
467 | } |
468 | |
469 | static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, |
470 | unsigned pasid) |
471 | { |
472 | struct amdgpu_device *adev = ring->adev; |
473 | uint32_t reg; |
474 | |
475 | if (ring->funcs->vmhub == AMDGPU_GFXHUB_00) |
476 | reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT)(adev->reg_offset[OSSSYS_HWIP][0][0] + 0x0000) + vmid; |
477 | else |
478 | reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM)(adev->reg_offset[OSSSYS_HWIP][0][0] + 0x0010) + vmid; |
479 | |
480 | amdgpu_ring_emit_wreg(ring, reg, pasid)(ring)->funcs->emit_wreg((ring), (reg), (pasid)); |
481 | } |
482 | |
483 | /* |
484 | * PTE format on NAVI 10: |
485 | * 63:59 reserved |
486 | * 58:57 reserved |
487 | * 56 F |
488 | * 55 L |
489 | * 54 reserved |
490 | * 53:52 SW |
491 | * 51 T |
492 | * 50:48 mtype |
493 | * 47:12 4k physical page base address |
494 | * 11:7 fragment |
495 | * 6 write |
496 | * 5 read |
497 | * 4 exe |
498 | * 3 Z |
499 | * 2 snooped |
500 | * 1 system |
501 | * 0 valid |
502 | * |
503 | * PDE format on NAVI 10: |
504 | * 63:59 block fragment size |
505 | * 58:55 reserved |
506 | * 54 P |
507 | * 53:48 reserved |
508 | * 47:6 physical base address of PD or PTE |
509 | * 5:3 reserved |
510 | * 2 C |
511 | * 1 system |
512 | * 0 valid |
513 | */ |
514 | |
515 | static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) |
516 | { |
517 | switch (flags) { |
518 | case AMDGPU_VM_MTYPE_DEFAULT(0 << 5): |
519 | return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC)((uint64_t)(MTYPE_NC) << 48); |
520 | case AMDGPU_VM_MTYPE_NC(1 << 5): |
521 | return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC)((uint64_t)(MTYPE_NC) << 48); |
522 | case AMDGPU_VM_MTYPE_WC(2 << 5): |
523 | return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC)((uint64_t)(MTYPE_WC) << 48); |
524 | case AMDGPU_VM_MTYPE_CC(3 << 5): |
525 | return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC)((uint64_t)(MTYPE_CC) << 48); |
526 | case AMDGPU_VM_MTYPE_UC(4 << 5): |
527 | return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC)((uint64_t)(MTYPE_UC) << 48); |
528 | default: |
529 | return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC)((uint64_t)(MTYPE_NC) << 48); |
530 | } |
531 | } |
532 | |
533 | static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, |
534 | uint64_t *addr, uint64_t *flags) |
535 | { |
536 | if (!(*flags & AMDGPU_PDE_PTE(1ULL << 54)) && !(*flags & AMDGPU_PTE_SYSTEM(1ULL << 1))) |
537 | *addr = adev->vm_manager.vram_base_offset + *addr - |
538 | adev->gmc.vram_start; |
539 | BUG_ON(*addr & 0xFFFF00000000003FULL)((!(*addr & 0xFFFF00000000003FULL)) ? (void)0 : __assert( "diagnostic ", "/usr/src/sys/dev/pci/drm/amd/amdgpu/gmc_v10_0.c" , 539, "!(*addr & 0xFFFF00000000003FULL)")); |
540 | |
541 | if (!adev->gmc.translate_further) |
542 | return; |
543 | |
544 | if (level == AMDGPU_VM_PDB1) { |
545 | /* Set the block fragment size */ |
546 | if (!(*flags & AMDGPU_PDE_PTE(1ULL << 54))) |
547 | *flags |= AMDGPU_PDE_BFS(0x9)((uint64_t)0x9 << 59); |
548 | |
549 | } else if (level == AMDGPU_VM_PDB0) { |
550 | if (*flags & AMDGPU_PDE_PTE(1ULL << 54)) |
551 | *flags &= ~AMDGPU_PDE_PTE(1ULL << 54); |
552 | else |
553 | *flags |= AMDGPU_PTE_TF(1ULL << 56); |
554 | } |
555 | } |
556 | |
557 | static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, |
558 | struct amdgpu_bo_va_mapping *mapping, |
559 | uint64_t *flags) |
560 | { |
561 | *flags &= ~AMDGPU_PTE_EXECUTABLE(1ULL << 4); |
562 | *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE(1ULL << 4); |
563 | |
564 | *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK((uint64_t)(7ULL) << 48); |
565 | *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK((uint64_t)(7ULL) << 48)); |
566 | |
567 | if (mapping->flags & AMDGPU_PTE_PRT(1ULL << 51)) { |
568 | *flags |= AMDGPU_PTE_PRT(1ULL << 51); |
569 | *flags |= AMDGPU_PTE_SNOOPED(1ULL << 2); |
570 | *flags |= AMDGPU_PTE_LOG(1ULL << 55); |
571 | *flags |= AMDGPU_PTE_SYSTEM(1ULL << 1); |
572 | *flags &= ~AMDGPU_PTE_VALID(1ULL << 0); |
573 | } |
574 | } |
575 | |
576 | static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) |
577 | { |
578 | u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL)amdgpu_device_rreg(adev, (adev->reg_offset[DCE_HWIP][0][1] + 0x000c), 0); |
579 | unsigned size; |
580 | |
581 | if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)(((d1vga_control) & 0x00000001L) >> 0x0)) { |
582 | size = AMDGPU_VBIOS_VGA_ALLOCATION(9 * 1024 * 1024); |
583 | } else { |
584 | u32 viewport; |
585 | u32 pitch; |
586 | |
587 | viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION)amdgpu_device_rreg(adev, (adev->reg_offset[DCE_HWIP][0][2] + 0x05ea), 0); |
588 | pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH)amdgpu_device_rreg(adev, (adev->reg_offset[DCE_HWIP][0][2] + 0x0607), 0); |
589 | size = (REG_GET_FIELD(viewport,(((viewport) & 0x3FFF0000L) >> 0x10) |
590 | HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT)(((viewport) & 0x3FFF0000L) >> 0x10) * |
591 | REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH)(((pitch) & 0x00003FFFL) >> 0x0) * |
592 | 4); |
593 | } |
594 | |
595 | return size; |
596 | } |
597 | |
598 | static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { |
599 | .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, |
600 | .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid, |
601 | .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, |
602 | .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, |
603 | .map_mtype = gmc_v10_0_map_mtype, |
604 | .get_vm_pde = gmc_v10_0_get_vm_pde, |
605 | .get_vm_pte = gmc_v10_0_get_vm_pte, |
606 | .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size, |
607 | }; |
608 | |
609 | static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) |
610 | { |
611 | if (adev->gmc.gmc_funcs == NULL((void *)0)) |
612 | adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs; |
613 | } |
614 | |
615 | static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) |
616 | { |
617 | switch (adev->asic_type) { |
618 | case CHIP_SIENNA_CICHLID: |
619 | adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM(2 * 8); |
620 | adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM2; |
621 | adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM8; |
622 | adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA0x400; |
623 | adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0]; |
624 | adev->umc.funcs = &umc_v8_7_funcs; |
625 | break; |
626 | default: |
627 | break; |
628 | } |
629 | } |
630 | |
631 | |
632 | static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) |
633 | { |
634 | adev->mmhub.funcs = &mmhub_v2_0_funcs; |
635 | } |
636 | |
637 | static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) |
638 | { |
639 | switch (adev->asic_type) { |
640 | case CHIP_SIENNA_CICHLID: |
641 | case CHIP_NAVY_FLOUNDER: |
642 | adev->gfxhub.funcs = &gfxhub_v2_1_funcs; |
643 | break; |
644 | default: |
645 | adev->gfxhub.funcs = &gfxhub_v2_0_funcs; |
646 | break; |
647 | } |
648 | } |
649 | |
650 | |
651 | static int gmc_v10_0_early_init(void *handle) |
652 | { |
653 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
654 | |
655 | gmc_v10_0_set_mmhub_funcs(adev); |
656 | gmc_v10_0_set_gfxhub_funcs(adev); |
657 | gmc_v10_0_set_gmc_funcs(adev); |
658 | gmc_v10_0_set_irq_funcs(adev); |
659 | gmc_v10_0_set_umc_funcs(adev); |
660 | |
661 | adev->gmc.shared_aperture_start = 0x2000000000000000ULL; |
662 | adev->gmc.shared_aperture_end = |
663 | adev->gmc.shared_aperture_start + (4ULL << 30) - 1; |
664 | adev->gmc.private_aperture_start = 0x1000000000000000ULL; |
665 | adev->gmc.private_aperture_end = |
666 | adev->gmc.private_aperture_start + (4ULL << 30) - 1; |
667 | |
668 | return 0; |
669 | } |
670 | |
671 | static int gmc_v10_0_late_init(void *handle) |
672 | { |
673 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
674 | int r; |
675 | |
676 | amdgpu_bo_late_init(adev); |
677 | |
678 | r = amdgpu_gmc_allocate_vm_inv_eng(adev); |
679 | if (r) |
680 | return r; |
681 | |
682 | r = amdgpu_gmc_ras_late_init(adev); |
683 | if (r) |
684 | return r; |
685 | |
686 | return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); |
687 | } |
688 | |
689 | static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, |
690 | struct amdgpu_gmc *mc) |
691 | { |
692 | u64 base = 0; |
693 | |
694 | base = adev->gfxhub.funcs->get_fb_location(adev); |
695 | |
696 | /* add the xgmi offset of the physical node */ |
697 | base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; |
698 | |
699 | amdgpu_gmc_vram_location(adev, &adev->gmc, base); |
700 | amdgpu_gmc_gart_location(adev, mc); |
701 | |
702 | /* base offset of vram pages */ |
703 | adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); |
704 | |
705 | /* add the xgmi offset of the physical node */ |
706 | adev->vm_manager.vram_base_offset += |
707 | adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; |
708 | } |
709 | |
710 | /** |
711 | * gmc_v10_0_mc_init - initialize the memory controller driver params |
712 | * |
713 | * @adev: amdgpu_device pointer |
714 | * |
715 | * Look up the amount of vram, vram width, and decide how to place |
716 | * vram and gart within the GPU's physical address space. |
717 | * Returns 0 for success. |
718 | */ |
719 | static int gmc_v10_0_mc_init(struct amdgpu_device *adev) |
720 | { |
721 | int r; |
722 | |
723 | /* size in MB on si */ |
724 | adev->gmc.mc_vram_size = |
725 | adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; |
726 | adev->gmc.real_vram_size = adev->gmc.mc_vram_size; |
727 | |
728 | if (!(adev->flags & AMD_IS_APU)) { |
729 | r = amdgpu_device_resize_fb_bar(adev); |
730 | if (r) |
731 | return r; |
732 | } |
733 | adev->gmc.aper_base = adev->fb_aper_offset; |
734 | adev->gmc.aper_size = adev->fb_aper_size; |
735 | |
736 | /* In case the PCI BAR is larger than the actual amount of vram */ |
737 | adev->gmc.visible_vram_size = adev->gmc.aper_size; |
738 | if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) |
739 | adev->gmc.visible_vram_size = adev->gmc.real_vram_size; |
740 | |
741 | /* set the gart size */ |
742 | if (amdgpu_gart_size == -1) { |
743 | switch (adev->asic_type) { |
744 | case CHIP_NAVI10: |
745 | case CHIP_NAVI14: |
746 | case CHIP_NAVI12: |
747 | case CHIP_SIENNA_CICHLID: |
748 | case CHIP_NAVY_FLOUNDER: |
749 | default: |
750 | adev->gmc.gart_size = 512ULL << 20; |
751 | break; |
752 | } |
753 | } else |
754 | adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; |
755 | |
756 | gmc_v10_0_vram_gtt_location(adev, &adev->gmc); |
757 | |
758 | return 0; |
759 | } |
760 | |
761 | static int gmc_v10_0_gart_init(struct amdgpu_device *adev) |
762 | { |
763 | int r; |
764 | |
765 | if (adev->gart.bo) { |
766 | WARN(1, "NAVI10 PCIE GART already initialized\n")({ int __ret = !!(1); if (__ret) printf("NAVI10 PCIE GART already initialized\n" ); __builtin_expect(!!(__ret), 0); }); |
767 | return 0; |
768 | } |
769 | |
770 | /* Initialize common gart structure */ |
771 | r = amdgpu_gart_init(adev); |
772 | if (r) |
773 | return r; |
774 | |
775 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; |
776 | adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC)((uint64_t)(MTYPE_UC) << 48) | |
777 | AMDGPU_PTE_EXECUTABLE(1ULL << 4); |
778 | |
779 | return amdgpu_gart_table_vram_alloc(adev); |
780 | } |
781 | |
782 | static int gmc_v10_0_sw_init(void *handle) |
783 | { |
784 | int r, vram_width = 0, vram_type = 0, vram_vendor = 0; |
785 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
786 | |
787 | adev->gfxhub.funcs->init(adev); |
788 | |
789 | adev->mmhub.funcs->init(adev); |
790 | |
791 | mtx_init(&adev->gmc.invalidate_lock, IPL_NONE)do { (void)(((void *)0)); (void)(0); __mtx_init((&adev-> gmc.invalidate_lock), ((((0x0)) > 0x0 && ((0x0)) < 0x9) ? 0x9 : ((0x0)))); } while (0); |
792 | |
793 | if (adev->asic_type == CHIP_SIENNA_CICHLID && amdgpu_emu_mode == 1) { |
794 | adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR69; |
795 | adev->gmc.vram_width = 1 * 128; /* numchan * chansize */ |
796 | } else { |
797 | r = amdgpu_atomfirmware_get_vram_info(adev, |
Value stored to 'r' is never read | |
798 | &vram_width, &vram_type, &vram_vendor); |
799 | adev->gmc.vram_width = vram_width; |
800 | |
801 | adev->gmc.vram_type = vram_type; |
802 | adev->gmc.vram_vendor = vram_vendor; |
803 | } |
804 | |
805 | switch (adev->asic_type) { |
806 | case CHIP_NAVI10: |
807 | case CHIP_NAVI14: |
808 | case CHIP_NAVI12: |
809 | case CHIP_SIENNA_CICHLID: |
810 | case CHIP_NAVY_FLOUNDER: |
811 | adev->num_vmhubs = 2; |
812 | /* |
813 | * To fulfill 4-level page support, |
814 | * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12, |
815 | * block size 512 (9bit) |
816 | */ |
817 | amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); |
818 | break; |
819 | default: |
820 | break; |
821 | } |
822 | |
823 | /* This interrupt is VMC page fault.*/ |
824 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, |
825 | VMC_1_0__SRCID__VM_FAULT0, |
826 | &adev->gmc.vm_fault); |
827 | |
828 | if (r) |
829 | return r; |
830 | |
831 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, |
832 | UTCL2_1_0__SRCID__FAULT0, |
833 | &adev->gmc.vm_fault); |
834 | if (r) |
835 | return r; |
836 | |
837 | if (!amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { |
838 | /* interrupt sent to DF. */ |
839 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, |
840 | &adev->gmc.ecc_irq); |
841 | if (r) |
842 | return r; |
843 | } |
844 | |
845 | /* |
846 | * Set the internal MC address mask This is the max address of the GPU's |
847 | * internal address space. |
848 | */ |
849 | adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ |
850 | |
851 | r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44))0; |
852 | if (r) { |
853 | printk(KERN_WARNING"\0014" "amdgpu: No suitable DMA available.\n"); |
854 | return r; |
855 | } |
856 | |
857 | if (adev->gmc.xgmi.supported) { |
858 | r = adev->gfxhub.funcs->get_xgmi_info(adev); |
859 | if (r) |
860 | return r; |
861 | } |
862 | |
863 | r = gmc_v10_0_mc_init(adev); |
864 | if (r) |
865 | return r; |
866 | |
867 | amdgpu_gmc_get_vbios_allocations(adev); |
868 | |
869 | /* Memory manager */ |
870 | r = amdgpu_bo_init(adev); |
871 | if (r) |
872 | return r; |
873 | |
874 | r = gmc_v10_0_gart_init(adev); |
875 | if (r) |
876 | return r; |
877 | |
878 | /* |
879 | * number of VMs |
880 | * VMID 0 is reserved for System |
881 | * amdgpu graphics/compute will use VMIDs 1-7 |
882 | * amdkfd will use VMIDs 8-15 |
883 | */ |
884 | adev->vm_manager.first_kfd_vmid = 8; |
885 | |
886 | amdgpu_vm_manager_init(adev); |
887 | |
888 | return 0; |
889 | } |
890 | |
891 | /** |
892 | * gmc_v8_0_gart_fini - vm fini callback |
893 | * |
894 | * @adev: amdgpu_device pointer |
895 | * |
896 | * Tears down the driver GART/VM setup (CIK). |
897 | */ |
898 | static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) |
899 | { |
900 | amdgpu_gart_table_vram_free(adev); |
901 | amdgpu_gart_fini(adev); |
902 | } |
903 | |
904 | static int gmc_v10_0_sw_fini(void *handle) |
905 | { |
906 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
907 | |
908 | amdgpu_vm_manager_fini(adev); |
909 | gmc_v10_0_gart_fini(adev); |
910 | amdgpu_gem_force_release(adev); |
911 | amdgpu_bo_fini(adev); |
912 | |
913 | return 0; |
914 | } |
915 | |
916 | static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev) |
917 | { |
918 | switch (adev->asic_type) { |
919 | case CHIP_NAVI10: |
920 | case CHIP_NAVI14: |
921 | case CHIP_NAVI12: |
922 | case CHIP_SIENNA_CICHLID: |
923 | case CHIP_NAVY_FLOUNDER: |
924 | break; |
925 | default: |
926 | break; |
927 | } |
928 | } |
929 | |
930 | /** |
931 | * gmc_v10_0_gart_enable - gart enable |
932 | * |
933 | * @adev: amdgpu_device pointer |
934 | */ |
935 | static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) |
936 | { |
937 | int r; |
938 | bool_Bool value; |
939 | u32 tmp; |
940 | |
941 | if (adev->gart.bo == NULL((void *)0)) { |
942 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n")printf("drm:pid%d:%s *ERROR* " "No VRAM object for PCIE GART.\n" , ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->ps_pid, __func__); |
943 | return -EINVAL22; |
944 | } |
945 | |
946 | r = amdgpu_gart_table_vram_pin(adev); |
947 | if (r) |
948 | return r; |
949 | |
950 | r = adev->gfxhub.funcs->gart_enable(adev); |
951 | if (r) |
952 | return r; |
953 | |
954 | r = adev->mmhub.funcs->gart_enable(adev); |
955 | if (r) |
956 | return r; |
957 | |
958 | tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL)amdgpu_device_rreg(adev, (adev->reg_offset[HDP_HWIP][0][0] + 0x00d3), 0); |
959 | tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK0x00000001L; |
960 | WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp)amdgpu_device_wreg(adev, ((adev->reg_offset[HDP_HWIP][0][0 ] + 0x00d3)), (tmp), 0); |
961 | |
962 | tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL)amdgpu_device_rreg(adev, (adev->reg_offset[HDP_HWIP][0][0] + 0x00cc), 0); |
963 | WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp)amdgpu_device_wreg(adev, ((adev->reg_offset[HDP_HWIP][0][0 ] + 0x00cc)), (tmp), 0); |
964 | |
965 | /* Flush HDP after it is initialized */ |
966 | adev->nbio.funcs->hdp_flush(adev, NULL((void *)0)); |
967 | |
968 | value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS2) ? |
969 | false0 : true1; |
970 | |
971 | adev->gfxhub.funcs->set_fault_enable_default(adev, value); |
972 | adev->mmhub.funcs->set_fault_enable_default(adev, value); |
973 | gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_01, 0); |
974 | gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_00, 0); |
975 | |
976 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",printk("\0016" "[" "drm" "] " "PCIE GART of %uM enabled (table at 0x%016llX).\n" , (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)) |
977 | (unsigned)(adev->gmc.gart_size >> 20),printk("\0016" "[" "drm" "] " "PCIE GART of %uM enabled (table at 0x%016llX).\n" , (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)) |
978 | (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo))printk("\0016" "[" "drm" "] " "PCIE GART of %uM enabled (table at 0x%016llX).\n" , (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); |
979 | |
980 | adev->gart.ready = true1; |
981 | |
982 | return 0; |
983 | } |
984 | |
985 | static int gmc_v10_0_hw_init(void *handle) |
986 | { |
987 | int r; |
988 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
989 | |
990 | /* The sequence of these two function calls matters.*/ |
991 | gmc_v10_0_init_golden_registers(adev); |
992 | |
993 | r = gmc_v10_0_gart_enable(adev); |
994 | if (r) |
995 | return r; |
996 | |
997 | if (adev->umc.funcs && adev->umc.funcs->init_registers) |
998 | adev->umc.funcs->init_registers(adev); |
999 | |
1000 | return 0; |
1001 | } |
1002 | |
1003 | /** |
1004 | * gmc_v10_0_gart_disable - gart disable |
1005 | * |
1006 | * @adev: amdgpu_device pointer |
1007 | * |
1008 | * This disables all VM page table. |
1009 | */ |
1010 | static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) |
1011 | { |
1012 | adev->gfxhub.funcs->gart_disable(adev); |
1013 | adev->mmhub.funcs->gart_disable(adev); |
1014 | amdgpu_gart_table_vram_unpin(adev); |
1015 | } |
1016 | |
1017 | static int gmc_v10_0_hw_fini(void *handle) |
1018 | { |
1019 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1020 | |
1021 | gmc_v10_0_gart_disable(adev); |
1022 | |
1023 | if (amdgpu_sriov_vf(adev)((adev)->virt.caps & (1 << 2))) { |
1024 | /* full access mode, so don't touch any GMC register */ |
1025 | DRM_DEBUG("For SRIOV client, shouldn't do anything.\n")__drm_dbg(DRM_UT_CORE, "For SRIOV client, shouldn't do anything.\n" ); |
1026 | return 0; |
1027 | } |
1028 | |
1029 | amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); |
1030 | amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); |
1031 | |
1032 | return 0; |
1033 | } |
1034 | |
1035 | static int gmc_v10_0_suspend(void *handle) |
1036 | { |
1037 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1038 | |
1039 | gmc_v10_0_hw_fini(adev); |
1040 | |
1041 | return 0; |
1042 | } |
1043 | |
1044 | static int gmc_v10_0_resume(void *handle) |
1045 | { |
1046 | int r; |
1047 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1048 | |
1049 | r = gmc_v10_0_hw_init(adev); |
1050 | if (r) |
1051 | return r; |
1052 | |
1053 | amdgpu_vmid_reset_all(adev); |
1054 | |
1055 | return 0; |
1056 | } |
1057 | |
1058 | static bool_Bool gmc_v10_0_is_idle(void *handle) |
1059 | { |
1060 | /* MC is always ready in GMC v10.*/ |
1061 | return true1; |
1062 | } |
1063 | |
1064 | static int gmc_v10_0_wait_for_idle(void *handle) |
1065 | { |
1066 | /* There is no need to wait for MC idle in GMC v10.*/ |
1067 | return 0; |
1068 | } |
1069 | |
1070 | static int gmc_v10_0_soft_reset(void *handle) |
1071 | { |
1072 | return 0; |
1073 | } |
1074 | |
1075 | static int gmc_v10_0_set_clockgating_state(void *handle, |
1076 | enum amd_clockgating_state state) |
1077 | { |
1078 | int r; |
1079 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1080 | |
1081 | r = adev->mmhub.funcs->set_clockgating(adev, state); |
1082 | if (r) |
1083 | return r; |
1084 | |
1085 | if (adev->asic_type == CHIP_SIENNA_CICHLID || |
1086 | adev->asic_type == CHIP_NAVY_FLOUNDER) |
1087 | return athub_v2_1_set_clockgating(adev, state); |
1088 | else |
1089 | return athub_v2_0_set_clockgating(adev, state); |
1090 | } |
1091 | |
1092 | static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags) |
1093 | { |
1094 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1095 | |
1096 | adev->mmhub.funcs->get_clockgating(adev, flags); |
1097 | |
1098 | if (adev->asic_type == CHIP_SIENNA_CICHLID || |
1099 | adev->asic_type == CHIP_NAVY_FLOUNDER) |
1100 | athub_v2_1_get_clockgating(adev, flags); |
1101 | else |
1102 | athub_v2_0_get_clockgating(adev, flags); |
1103 | } |
1104 | |
1105 | static int gmc_v10_0_set_powergating_state(void *handle, |
1106 | enum amd_powergating_state state) |
1107 | { |
1108 | return 0; |
1109 | } |
1110 | |
1111 | const struct amd_ip_funcs gmc_v10_0_ip_funcs = { |
1112 | .name = "gmc_v10_0", |
1113 | .early_init = gmc_v10_0_early_init, |
1114 | .late_init = gmc_v10_0_late_init, |
1115 | .sw_init = gmc_v10_0_sw_init, |
1116 | .sw_fini = gmc_v10_0_sw_fini, |
1117 | .hw_init = gmc_v10_0_hw_init, |
1118 | .hw_fini = gmc_v10_0_hw_fini, |
1119 | .suspend = gmc_v10_0_suspend, |
1120 | .resume = gmc_v10_0_resume, |
1121 | .is_idle = gmc_v10_0_is_idle, |
1122 | .wait_for_idle = gmc_v10_0_wait_for_idle, |
1123 | .soft_reset = gmc_v10_0_soft_reset, |
1124 | .set_clockgating_state = gmc_v10_0_set_clockgating_state, |
1125 | .set_powergating_state = gmc_v10_0_set_powergating_state, |
1126 | .get_clockgating_state = gmc_v10_0_get_clockgating_state, |
1127 | }; |
1128 | |
1129 | const struct amdgpu_ip_block_version gmc_v10_0_ip_block = |
1130 | { |
1131 | .type = AMD_IP_BLOCK_TYPE_GMC, |
1132 | .major = 10, |
1133 | .minor = 0, |
1134 | .rev = 0, |
1135 | .funcs = &gmc_v10_0_ip_funcs, |
1136 | }; |