File: | dev/pci/drm/amd/amdgpu/sdma_v2_4.c |
Warning: | line 391, column 14 The right operand of '+' is a garbage value due to array index out of bounds |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* | ||||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||||
3 | * | ||||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||||
5 | * copy of this software and associated documentation files (the "Software"), | ||||
6 | * to deal in the Software without restriction, including without limitation | ||||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||||
9 | * Software is furnished to do so, subject to the following conditions: | ||||
10 | * | ||||
11 | * The above copyright notice and this permission notice shall be included in | ||||
12 | * all copies or substantial portions of the Software. | ||||
13 | * | ||||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||||
21 | * | ||||
22 | * Authors: Alex Deucher | ||||
23 | */ | ||||
24 | |||||
25 | #include <linux/delay.h> | ||||
26 | #include <linux/firmware.h> | ||||
27 | #include <linux/module.h> | ||||
28 | |||||
29 | #include "amdgpu.h" | ||||
30 | #include "amdgpu_ucode.h" | ||||
31 | #include "amdgpu_trace.h" | ||||
32 | #include "vi.h" | ||||
33 | #include "vid.h" | ||||
34 | |||||
35 | #include "oss/oss_2_4_d.h" | ||||
36 | #include "oss/oss_2_4_sh_mask.h" | ||||
37 | |||||
38 | #include "gmc/gmc_7_1_d.h" | ||||
39 | #include "gmc/gmc_7_1_sh_mask.h" | ||||
40 | |||||
41 | #include "gca/gfx_8_0_d.h" | ||||
42 | #include "gca/gfx_8_0_enum.h" | ||||
43 | #include "gca/gfx_8_0_sh_mask.h" | ||||
44 | |||||
45 | #include "bif/bif_5_0_d.h" | ||||
46 | #include "bif/bif_5_0_sh_mask.h" | ||||
47 | |||||
48 | #include "iceland_sdma_pkt_open.h" | ||||
49 | |||||
50 | #include "ivsrcid/ivsrcid_vislands30.h" | ||||
51 | |||||
52 | static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev); | ||||
53 | static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev); | ||||
54 | static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev); | ||||
55 | static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev); | ||||
56 | |||||
57 | MODULE_FIRMWARE("amdgpu/topaz_sdma.bin"); | ||||
58 | MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin"); | ||||
59 | |||||
60 | static const u32 sdma_offsets[SDMA_MAX_INSTANCE2] = | ||||
61 | { | ||||
62 | SDMA0_REGISTER_OFFSET0x0, | ||||
63 | SDMA1_REGISTER_OFFSET0x200 | ||||
64 | }; | ||||
65 | |||||
66 | static const u32 golden_settings_iceland_a11[] = | ||||
67 | { | ||||
68 | mmSDMA0_CHICKEN_BITS0x3405, 0xfc910007, 0x00810007, | ||||
69 | mmSDMA0_CLK_CTRL0x3403, 0xff000fff, 0x00000000, | ||||
70 | mmSDMA1_CHICKEN_BITS0x3605, 0xfc910007, 0x00810007, | ||||
71 | mmSDMA1_CLK_CTRL0x3603, 0xff000fff, 0x00000000, | ||||
72 | }; | ||||
73 | |||||
74 | static const u32 iceland_mgcg_cgcg_init[] = | ||||
75 | { | ||||
76 | mmSDMA0_CLK_CTRL0x3403, 0xff000ff0, 0x00000100, | ||||
77 | mmSDMA1_CLK_CTRL0x3603, 0xff000ff0, 0x00000100 | ||||
78 | }; | ||||
79 | |||||
80 | /* | ||||
81 | * sDMA - System DMA | ||||
82 | * Starting with CIK, the GPU has new asynchronous | ||||
83 | * DMA engines. These engines are used for compute | ||||
84 | * and gfx. There are two DMA engines (SDMA0, SDMA1) | ||||
85 | * and each one supports 1 ring buffer used for gfx | ||||
86 | * and 2 queues used for compute. | ||||
87 | * | ||||
88 | * The programming model is very similar to the CP | ||||
89 | * (ring buffer, IBs, etc.), but sDMA has it's own | ||||
90 | * packet format that is different from the PM4 format | ||||
91 | * used by the CP. sDMA supports copying data, writing | ||||
92 | * embedded data, solid fills, and a number of other | ||||
93 | * things. It also has support for tiling/detiling of | ||||
94 | * buffers. | ||||
95 | */ | ||||
96 | |||||
97 | static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) | ||||
98 | { | ||||
99 | switch (adev->asic_type) { | ||||
100 | case CHIP_TOPAZ: | ||||
101 | amdgpu_device_program_register_sequence(adev, | ||||
102 | iceland_mgcg_cgcg_init, | ||||
103 | ARRAY_SIZE(iceland_mgcg_cgcg_init)(sizeof((iceland_mgcg_cgcg_init)) / sizeof((iceland_mgcg_cgcg_init )[0]))); | ||||
104 | amdgpu_device_program_register_sequence(adev, | ||||
105 | golden_settings_iceland_a11, | ||||
106 | ARRAY_SIZE(golden_settings_iceland_a11)(sizeof((golden_settings_iceland_a11)) / sizeof((golden_settings_iceland_a11 )[0]))); | ||||
107 | break; | ||||
108 | default: | ||||
109 | break; | ||||
110 | } | ||||
111 | } | ||||
112 | |||||
113 | static void sdma_v2_4_free_microcode(struct amdgpu_device *adev) | ||||
114 | { | ||||
115 | int i; | ||||
116 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
117 | release_firmware(adev->sdma.instance[i].fw); | ||||
118 | adev->sdma.instance[i].fw = NULL((void *)0); | ||||
119 | } | ||||
120 | } | ||||
121 | |||||
122 | /** | ||||
123 | * sdma_v2_4_init_microcode - load ucode images from disk | ||||
124 | * | ||||
125 | * @adev: amdgpu_device pointer | ||||
126 | * | ||||
127 | * Use the firmware interface to load the ucode images into | ||||
128 | * the driver (not loaded into hw). | ||||
129 | * Returns 0 on success, error on failure. | ||||
130 | */ | ||||
131 | static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) | ||||
132 | { | ||||
133 | const char *chip_name; | ||||
134 | char fw_name[30]; | ||||
135 | int err = 0, i; | ||||
136 | struct amdgpu_firmware_info *info = NULL((void *)0); | ||||
137 | const struct common_firmware_header *header = NULL((void *)0); | ||||
138 | const struct sdma_firmware_header_v1_0 *hdr; | ||||
139 | |||||
140 | DRM_DEBUG("\n")___drm_dbg(((void *)0), DRM_UT_CORE, "\n"); | ||||
141 | |||||
142 | switch (adev->asic_type) { | ||||
143 | case CHIP_TOPAZ: | ||||
144 | chip_name = "topaz"; | ||||
145 | break; | ||||
146 | default: BUG()do { panic("BUG at %s:%d", "/usr/src/sys/dev/pci/drm/amd/amdgpu/sdma_v2_4.c" , 146); } while (0); | ||||
147 | } | ||||
148 | |||||
149 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
150 | if (i == 0) | ||||
151 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); | ||||
152 | else | ||||
153 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); | ||||
154 | err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); | ||||
155 | if (err) | ||||
156 | goto out; | ||||
157 | err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); | ||||
158 | if (err) | ||||
159 | goto out; | ||||
160 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; | ||||
161 | adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version)((__uint32_t)(hdr->header.ucode_version)); | ||||
162 | adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version)((__uint32_t)(hdr->ucode_feature_version)); | ||||
163 | if (adev->sdma.instance[i].feature_version >= 20) | ||||
164 | adev->sdma.instance[i].burst_nop = true1; | ||||
165 | |||||
166 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) { | ||||
167 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; | ||||
168 | info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; | ||||
169 | info->fw = adev->sdma.instance[i].fw; | ||||
170 | header = (const struct common_firmware_header *)info->fw->data; | ||||
171 | adev->firmware.fw_size += | ||||
172 | roundup2(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE)(((((__uint32_t)(header->ucode_size_bytes))) + (((1 << 12)) - 1)) & (~((__typeof(((__uint32_t)(header->ucode_size_bytes ))))((1 << 12)) - 1))); | ||||
173 | } | ||||
174 | } | ||||
175 | |||||
176 | out: | ||||
177 | if (err) { | ||||
178 | pr_err("sdma_v2_4: Failed to load firmware \"%s\"\n", fw_name)printk("\0013" "amdgpu: " "sdma_v2_4: Failed to load firmware \"%s\"\n" , fw_name); | ||||
179 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
180 | release_firmware(adev->sdma.instance[i].fw); | ||||
181 | adev->sdma.instance[i].fw = NULL((void *)0); | ||||
182 | } | ||||
183 | } | ||||
184 | return err; | ||||
185 | } | ||||
186 | |||||
187 | /** | ||||
188 | * sdma_v2_4_ring_get_rptr - get the current read pointer | ||||
189 | * | ||||
190 | * @ring: amdgpu ring pointer | ||||
191 | * | ||||
192 | * Get the current rptr from the hardware (VI+). | ||||
193 | */ | ||||
194 | static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring) | ||||
195 | { | ||||
196 | /* XXX check if swapping is necessary on BE */ | ||||
197 | return *ring->rptr_cpu_addr >> 2; | ||||
198 | } | ||||
199 | |||||
200 | /** | ||||
201 | * sdma_v2_4_ring_get_wptr - get the current write pointer | ||||
202 | * | ||||
203 | * @ring: amdgpu ring pointer | ||||
204 | * | ||||
205 | * Get the current wptr from the hardware (VI+). | ||||
206 | */ | ||||
207 | static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) | ||||
208 | { | ||||
209 | struct amdgpu_device *adev = ring->adev; | ||||
210 | u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me])amdgpu_device_rreg(adev, (0x3484 + sdma_offsets[ring->me]) , 0) >> 2; | ||||
211 | |||||
212 | return wptr; | ||||
213 | } | ||||
214 | |||||
215 | /** | ||||
216 | * sdma_v2_4_ring_set_wptr - commit the write pointer | ||||
217 | * | ||||
218 | * @ring: amdgpu ring pointer | ||||
219 | * | ||||
220 | * Write the wptr back to the hardware (VI+). | ||||
221 | */ | ||||
222 | static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) | ||||
223 | { | ||||
224 | struct amdgpu_device *adev = ring->adev; | ||||
225 | |||||
226 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], ring->wptr << 2)amdgpu_device_wreg(adev, (0x3484 + sdma_offsets[ring->me]) , (ring->wptr << 2), 0); | ||||
227 | } | ||||
228 | |||||
229 | static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | ||||
230 | { | ||||
231 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); | ||||
232 | int i; | ||||
233 | |||||
234 | for (i = 0; i < count; i++) | ||||
235 | if (sdma && sdma->burst_nop && (i == 0)) | ||||
236 | amdgpu_ring_write(ring, ring->funcs->nop | | ||||
237 | SDMA_PKT_NOP_HEADER_COUNT(count - 1)(((count - 1) & 0x00003FFF) << 16)); | ||||
238 | else | ||||
239 | amdgpu_ring_write(ring, ring->funcs->nop); | ||||
240 | } | ||||
241 | |||||
242 | /** | ||||
243 | * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine | ||||
244 | * | ||||
245 | * @ring: amdgpu ring pointer | ||||
246 | * @job: job to retrieve vmid from | ||||
247 | * @ib: IB object to schedule | ||||
248 | * @flags: unused | ||||
249 | * | ||||
250 | * Schedule an IB in the DMA ring (VI). | ||||
251 | */ | ||||
252 | static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, | ||||
253 | struct amdgpu_job *job, | ||||
254 | struct amdgpu_ib *ib, | ||||
255 | uint32_t flags) | ||||
256 | { | ||||
257 | unsigned vmid = AMDGPU_JOB_GET_VMID(job)((job) ? (job)->vmid : 0); | ||||
258 | |||||
259 | /* IB packet must end on a 8 DW boundary */ | ||||
260 | sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)((u32)(ring->wptr))) & 7); | ||||
261 | |||||
262 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT)(((4) & 0x000000FF) << 0) | | ||||
263 | SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)(((vmid & 0xf) & 0x0000000F) << 16)); | ||||
264 | /* base must be 32 byte aligned */ | ||||
265 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)((u32)(ib->gpu_addr)) & 0xffffffe0); | ||||
266 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)((u32)(((ib->gpu_addr) >> 16) >> 16))); | ||||
267 | amdgpu_ring_write(ring, ib->length_dw); | ||||
268 | amdgpu_ring_write(ring, 0); | ||||
269 | amdgpu_ring_write(ring, 0); | ||||
270 | |||||
271 | } | ||||
272 | |||||
273 | /** | ||||
274 | * sdma_v2_4_ring_emit_hdp_flush - emit an hdp flush on the DMA ring | ||||
275 | * | ||||
276 | * @ring: amdgpu ring pointer | ||||
277 | * | ||||
278 | * Emit an hdp flush packet on the requested DMA ring. | ||||
279 | */ | ||||
280 | static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring) | ||||
281 | { | ||||
282 | u32 ref_and_mask = 0; | ||||
283 | |||||
284 | if (ring->me == 0) | ||||
285 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1)(((ref_and_mask) & ~0x400) | (0x400 & ((1) << 0xa ))); | ||||
286 | else | ||||
287 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1)(((ref_and_mask) & ~0x800) | (0x800 & ((1) << 0xb ))); | ||||
288 | |||||
289 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM)(((8) & 0x000000FF) << 0) | | ||||
290 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1)(((1) & 0x00000001) << 26) | | ||||
291 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)(((3) & 0x00000007) << 28)); /* == */ | ||||
292 | amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE0x1538 << 2); | ||||
293 | amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ0x1537 << 2); | ||||
294 | amdgpu_ring_write(ring, ref_and_mask); /* reference */ | ||||
295 | amdgpu_ring_write(ring, ref_and_mask); /* mask */ | ||||
296 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff)(((0xfff) & 0x00000FFF) << 16) | | ||||
297 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)(((10) & 0x0000FFFF) << 0)); /* retry count, poll interval */ | ||||
298 | } | ||||
299 | |||||
300 | /** | ||||
301 | * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring | ||||
302 | * | ||||
303 | * @ring: amdgpu ring pointer | ||||
304 | * @addr: address | ||||
305 | * @seq: sequence number | ||||
306 | * @flags: fence related flags | ||||
307 | * | ||||
308 | * Add a DMA fence packet to the ring to write | ||||
309 | * the fence seq number and DMA trap packet to generate | ||||
310 | * an interrupt if needed (VI). | ||||
311 | */ | ||||
312 | static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | ||||
313 | unsigned flags) | ||||
314 | { | ||||
315 | bool_Bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT(1 << 0); | ||||
316 | /* write the fence */ | ||||
317 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)(((5) & 0x000000FF) << 0)); | ||||
318 | amdgpu_ring_write(ring, lower_32_bits(addr)((u32)(addr))); | ||||
319 | amdgpu_ring_write(ring, upper_32_bits(addr)((u32)(((addr) >> 16) >> 16))); | ||||
320 | amdgpu_ring_write(ring, lower_32_bits(seq)((u32)(seq))); | ||||
321 | |||||
322 | /* optionally write high bits as well */ | ||||
323 | if (write64bit) { | ||||
324 | addr += 4; | ||||
325 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)(((5) & 0x000000FF) << 0)); | ||||
326 | amdgpu_ring_write(ring, lower_32_bits(addr)((u32)(addr))); | ||||
327 | amdgpu_ring_write(ring, upper_32_bits(addr)((u32)(((addr) >> 16) >> 16))); | ||||
328 | amdgpu_ring_write(ring, upper_32_bits(seq)((u32)(((seq) >> 16) >> 16))); | ||||
329 | } | ||||
330 | |||||
331 | /* generate an interrupt */ | ||||
332 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)(((6) & 0x000000FF) << 0)); | ||||
333 | amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)(((0) & 0x0FFFFFFF) << 0)); | ||||
334 | } | ||||
335 | |||||
336 | /** | ||||
337 | * sdma_v2_4_gfx_stop - stop the gfx async dma engines | ||||
338 | * | ||||
339 | * @adev: amdgpu_device pointer | ||||
340 | * | ||||
341 | * Stop the gfx async dma ring buffers (VI). | ||||
342 | */ | ||||
343 | static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) | ||||
344 | { | ||||
345 | u32 rb_cntl, ib_cntl; | ||||
346 | int i; | ||||
347 | |||||
348 | amdgpu_sdma_unset_buffer_funcs_helper(adev); | ||||
349 | |||||
350 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
351 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])amdgpu_device_rreg(adev, (0x3480 + sdma_offsets[i]), 0); | ||||
352 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0)(((rb_cntl) & ~0x1) | (0x1 & ((0) << 0x0))); | ||||
353 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl)amdgpu_device_wreg(adev, (0x3480 + sdma_offsets[i]), (rb_cntl ), 0); | ||||
354 | ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])amdgpu_device_rreg(adev, (0x348a + sdma_offsets[i]), 0); | ||||
355 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0)(((ib_cntl) & ~0x1) | (0x1 & ((0) << 0x0))); | ||||
356 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl)amdgpu_device_wreg(adev, (0x348a + sdma_offsets[i]), (ib_cntl ), 0); | ||||
357 | } | ||||
358 | } | ||||
359 | |||||
360 | /** | ||||
361 | * sdma_v2_4_rlc_stop - stop the compute async dma engines | ||||
362 | * | ||||
363 | * @adev: amdgpu_device pointer | ||||
364 | * | ||||
365 | * Stop the compute async dma queues (VI). | ||||
366 | */ | ||||
367 | static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev) | ||||
368 | { | ||||
369 | /* XXX todo */ | ||||
370 | } | ||||
371 | |||||
372 | /** | ||||
373 | * sdma_v2_4_enable - stop the async dma engines | ||||
374 | * | ||||
375 | * @adev: amdgpu_device pointer | ||||
376 | * @enable: enable/disable the DMA MEs. | ||||
377 | * | ||||
378 | * Halt or unhalt the async dma engines (VI). | ||||
379 | */ | ||||
380 | static void sdma_v2_4_enable(struct amdgpu_device *adev, bool_Bool enable) | ||||
381 | { | ||||
382 | u32 f32_cntl; | ||||
383 | int i; | ||||
384 | |||||
385 | if (!enable
| ||||
386 | sdma_v2_4_gfx_stop(adev); | ||||
387 | sdma_v2_4_rlc_stop(adev); | ||||
388 | } | ||||
389 | |||||
390 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
391 | f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])amdgpu_device_rreg(adev, (0x3412 + sdma_offsets[i]), 0); | ||||
| |||||
392 | if (enable
| ||||
393 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0)(((f32_cntl) & ~0x1) | (0x1 & ((0) << 0x0))); | ||||
394 | else | ||||
395 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1)(((f32_cntl) & ~0x1) | (0x1 & ((1) << 0x0))); | ||||
396 | WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl)amdgpu_device_wreg(adev, (0x3412 + sdma_offsets[i]), (f32_cntl ), 0); | ||||
397 | } | ||||
398 | } | ||||
399 | |||||
400 | /** | ||||
401 | * sdma_v2_4_gfx_resume - setup and start the async dma engines | ||||
402 | * | ||||
403 | * @adev: amdgpu_device pointer | ||||
404 | * | ||||
405 | * Set up the gfx DMA ring buffers and enable them (VI). | ||||
406 | * Returns 0 for success, error for failure. | ||||
407 | */ | ||||
408 | static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) | ||||
409 | { | ||||
410 | struct amdgpu_ring *ring; | ||||
411 | u32 rb_cntl, ib_cntl; | ||||
412 | u32 rb_bufsz; | ||||
413 | int i, j, r; | ||||
414 | |||||
415 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
416 | ring = &adev->sdma.instance[i].ring; | ||||
417 | |||||
418 | mutex_lock(&adev->srbm_mutex)rw_enter_write(&adev->srbm_mutex); | ||||
419 | for (j = 0; j < 16; j++) { | ||||
420 | vi_srbm_select(adev, 0, 0, 0, j); | ||||
421 | /* SDMA GFX */ | ||||
422 | WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0)amdgpu_device_wreg(adev, (0x34a7 + sdma_offsets[i]), (0), 0); | ||||
423 | WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0)amdgpu_device_wreg(adev, (0x34a8 + sdma_offsets[i]), (0), 0); | ||||
424 | } | ||||
425 | vi_srbm_select(adev, 0, 0, 0, 0); | ||||
426 | mutex_unlock(&adev->srbm_mutex)rw_exit_write(&adev->srbm_mutex); | ||||
427 | |||||
428 | WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],amdgpu_device_wreg(adev, (0x3406 + sdma_offsets[i]), (adev-> gfx.config.gb_addr_config & 0x70), 0) | ||||
429 | adev->gfx.config.gb_addr_config & 0x70)amdgpu_device_wreg(adev, (0x3406 + sdma_offsets[i]), (adev-> gfx.config.gb_addr_config & 0x70), 0); | ||||
430 | |||||
431 | WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0)amdgpu_device_wreg(adev, (0x3409 + sdma_offsets[i]), (0), 0); | ||||
432 | |||||
433 | /* Set ring buffer size in dwords */ | ||||
434 | rb_bufsz = order_base_2(ring->ring_size / 4)drm_order(ring->ring_size / 4); | ||||
435 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])amdgpu_device_rreg(adev, (0x3480 + sdma_offsets[i]), 0); | ||||
436 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz)(((rb_cntl) & ~0x3e) | (0x3e & ((rb_bufsz) << 0x1 ))); | ||||
437 | #ifdef __BIG_ENDIAN | ||||
438 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1)(((rb_cntl) & ~0x200) | (0x200 & ((1) << 0x9))); | ||||
439 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,(((rb_cntl) & ~0x2000) | (0x2000 & ((1) << 0xd) )) | ||||
440 | RPTR_WRITEBACK_SWAP_ENABLE, 1)(((rb_cntl) & ~0x2000) | (0x2000 & ((1) << 0xd) )); | ||||
441 | #endif | ||||
442 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl)amdgpu_device_wreg(adev, (0x3480 + sdma_offsets[i]), (rb_cntl ), 0); | ||||
443 | |||||
444 | /* Initialize the ring buffer's read and write pointers */ | ||||
445 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0)amdgpu_device_wreg(adev, (0x3483 + sdma_offsets[i]), (0), 0); | ||||
446 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0)amdgpu_device_wreg(adev, (0x3484 + sdma_offsets[i]), (0), 0); | ||||
447 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0)amdgpu_device_wreg(adev, (0x348b + sdma_offsets[i]), (0), 0); | ||||
448 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0)amdgpu_device_wreg(adev, (0x348c + sdma_offsets[i]), (0), 0); | ||||
449 | |||||
450 | /* set the wb address whether it's enabled or not */ | ||||
451 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],amdgpu_device_wreg(adev, (0x3488 + sdma_offsets[i]), (((u32)( ((ring->rptr_gpu_addr) >> 16) >> 16)) & 0xFFFFFFFF ), 0) | ||||
452 | upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF)amdgpu_device_wreg(adev, (0x3488 + sdma_offsets[i]), (((u32)( ((ring->rptr_gpu_addr) >> 16) >> 16)) & 0xFFFFFFFF ), 0); | ||||
453 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],amdgpu_device_wreg(adev, (0x3489 + sdma_offsets[i]), (((u32)( ring->rptr_gpu_addr)) & 0xFFFFFFFC), 0) | ||||
454 | lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC)amdgpu_device_wreg(adev, (0x3489 + sdma_offsets[i]), (((u32)( ring->rptr_gpu_addr)) & 0xFFFFFFFC), 0); | ||||
455 | |||||
456 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1)(((rb_cntl) & ~0x1000) | (0x1000 & ((1) << 0xc) )); | ||||
457 | |||||
458 | WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8)amdgpu_device_wreg(adev, (0x3481 + sdma_offsets[i]), (ring-> gpu_addr >> 8), 0); | ||||
459 | WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40)amdgpu_device_wreg(adev, (0x3482 + sdma_offsets[i]), (ring-> gpu_addr >> 40), 0); | ||||
460 | |||||
461 | ring->wptr = 0; | ||||
462 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2)amdgpu_device_wreg(adev, (0x3484 + sdma_offsets[i]), (ring-> wptr << 2), 0); | ||||
463 | |||||
464 | /* enable DMA RB */ | ||||
465 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1)(((rb_cntl) & ~0x1) | (0x1 & ((1) << 0x0))); | ||||
466 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl)amdgpu_device_wreg(adev, (0x3480 + sdma_offsets[i]), (rb_cntl ), 0); | ||||
467 | |||||
468 | ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])amdgpu_device_rreg(adev, (0x348a + sdma_offsets[i]), 0); | ||||
469 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1)(((ib_cntl) & ~0x1) | (0x1 & ((1) << 0x0))); | ||||
470 | #ifdef __BIG_ENDIAN | ||||
471 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1)(((ib_cntl) & ~0x10) | (0x10 & ((1) << 0x4))); | ||||
472 | #endif | ||||
473 | /* enable DMA IBs */ | ||||
474 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl)amdgpu_device_wreg(adev, (0x348a + sdma_offsets[i]), (ib_cntl ), 0); | ||||
475 | |||||
476 | ring->sched.ready = true1; | ||||
477 | } | ||||
478 | |||||
479 | sdma_v2_4_enable(adev, true1); | ||||
480 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
481 | ring = &adev->sdma.instance[i].ring; | ||||
482 | r = amdgpu_ring_test_helper(ring); | ||||
483 | if (r) | ||||
484 | return r; | ||||
485 | |||||
486 | if (adev->mman.buffer_funcs_ring == ring) | ||||
487 | amdgpu_ttm_set_buffer_funcs_status(adev, true1); | ||||
488 | } | ||||
489 | |||||
490 | return 0; | ||||
491 | } | ||||
492 | |||||
493 | /** | ||||
494 | * sdma_v2_4_rlc_resume - setup and start the async dma engines | ||||
495 | * | ||||
496 | * @adev: amdgpu_device pointer | ||||
497 | * | ||||
498 | * Set up the compute DMA queues and enable them (VI). | ||||
499 | * Returns 0 for success, error for failure. | ||||
500 | */ | ||||
501 | static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev) | ||||
502 | { | ||||
503 | /* XXX todo */ | ||||
504 | return 0; | ||||
505 | } | ||||
506 | |||||
507 | |||||
508 | /** | ||||
509 | * sdma_v2_4_start - setup and start the async dma engines | ||||
510 | * | ||||
511 | * @adev: amdgpu_device pointer | ||||
512 | * | ||||
513 | * Set up the DMA engines and enable them (VI). | ||||
514 | * Returns 0 for success, error for failure. | ||||
515 | */ | ||||
516 | static int sdma_v2_4_start(struct amdgpu_device *adev) | ||||
517 | { | ||||
518 | int r; | ||||
519 | |||||
520 | /* halt the engine before programing */ | ||||
521 | sdma_v2_4_enable(adev, false0); | ||||
522 | |||||
523 | /* start the gfx rings and rlc compute queues */ | ||||
524 | r = sdma_v2_4_gfx_resume(adev); | ||||
525 | if (r) | ||||
526 | return r; | ||||
527 | r = sdma_v2_4_rlc_resume(adev); | ||||
528 | if (r) | ||||
529 | return r; | ||||
530 | |||||
531 | return 0; | ||||
532 | } | ||||
533 | |||||
534 | /** | ||||
535 | * sdma_v2_4_ring_test_ring - simple async dma engine test | ||||
536 | * | ||||
537 | * @ring: amdgpu_ring structure holding ring information | ||||
538 | * | ||||
539 | * Test the DMA engine by writing using it to write an | ||||
540 | * value to memory. (VI). | ||||
541 | * Returns 0 for success, error for failure. | ||||
542 | */ | ||||
543 | static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) | ||||
544 | { | ||||
545 | struct amdgpu_device *adev = ring->adev; | ||||
546 | unsigned i; | ||||
547 | unsigned index; | ||||
548 | int r; | ||||
549 | u32 tmp; | ||||
550 | u64 gpu_addr; | ||||
551 | |||||
552 | r = amdgpu_device_wb_get(adev, &index); | ||||
553 | if (r) | ||||
554 | return r; | ||||
555 | |||||
556 | gpu_addr = adev->wb.gpu_addr + (index * 4); | ||||
557 | tmp = 0xCAFEDEAD; | ||||
558 | adev->wb.wb[index] = cpu_to_le32(tmp)((__uint32_t)(tmp)); | ||||
559 | |||||
560 | r = amdgpu_ring_alloc(ring, 5); | ||||
561 | if (r) | ||||
562 | goto error_free_wb; | ||||
563 | |||||
564 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE)(((2) & 0x000000FF) << 0) | | ||||
565 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)(((0) & 0x000000FF) << 8)); | ||||
566 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)((u32)(gpu_addr))); | ||||
567 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)((u32)(((gpu_addr) >> 16) >> 16))); | ||||
568 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)(((1) & 0x003FFFFF) << 0)); | ||||
569 | amdgpu_ring_write(ring, 0xDEADBEEF); | ||||
570 | amdgpu_ring_commit(ring); | ||||
571 | |||||
572 | for (i = 0; i < adev->usec_timeout; i++) { | ||||
573 | tmp = le32_to_cpu(adev->wb.wb[index])((__uint32_t)(adev->wb.wb[index])); | ||||
574 | if (tmp == 0xDEADBEEF) | ||||
575 | break; | ||||
576 | udelay(1); | ||||
577 | } | ||||
578 | |||||
579 | if (i >= adev->usec_timeout) | ||||
580 | r = -ETIMEDOUT60; | ||||
581 | |||||
582 | error_free_wb: | ||||
583 | amdgpu_device_wb_free(adev, index); | ||||
584 | return r; | ||||
585 | } | ||||
586 | |||||
587 | /** | ||||
588 | * sdma_v2_4_ring_test_ib - test an IB on the DMA engine | ||||
589 | * | ||||
590 | * @ring: amdgpu_ring structure holding ring information | ||||
591 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT | ||||
592 | * | ||||
593 | * Test a simple IB in the DMA ring (VI). | ||||
594 | * Returns 0 on success, error on failure. | ||||
595 | */ | ||||
596 | static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) | ||||
597 | { | ||||
598 | struct amdgpu_device *adev = ring->adev; | ||||
599 | struct amdgpu_ib ib; | ||||
600 | struct dma_fence *f = NULL((void *)0); | ||||
601 | unsigned index; | ||||
602 | u32 tmp = 0; | ||||
603 | u64 gpu_addr; | ||||
604 | long r; | ||||
605 | |||||
606 | r = amdgpu_device_wb_get(adev, &index); | ||||
607 | if (r) | ||||
608 | return r; | ||||
609 | |||||
610 | gpu_addr = adev->wb.gpu_addr + (index * 4); | ||||
611 | tmp = 0xCAFEDEAD; | ||||
612 | adev->wb.wb[index] = cpu_to_le32(tmp)((__uint32_t)(tmp)); | ||||
613 | memset(&ib, 0, sizeof(ib))__builtin_memset((&ib), (0), (sizeof(ib))); | ||||
614 | r = amdgpu_ib_get(adev, NULL((void *)0), 256, | ||||
615 | AMDGPU_IB_POOL_DIRECT, &ib); | ||||
616 | if (r) | ||||
617 | goto err0; | ||||
618 | |||||
619 | ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE)(((2) & 0x000000FF) << 0) | | ||||
620 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)(((0) & 0x000000FF) << 8); | ||||
621 | ib.ptr[1] = lower_32_bits(gpu_addr)((u32)(gpu_addr)); | ||||
622 | ib.ptr[2] = upper_32_bits(gpu_addr)((u32)(((gpu_addr) >> 16) >> 16)); | ||||
623 | ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)(((1) & 0x003FFFFF) << 0); | ||||
624 | ib.ptr[4] = 0xDEADBEEF; | ||||
625 | ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP)(((0) & 0x000000FF) << 0); | ||||
626 | ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP)(((0) & 0x000000FF) << 0); | ||||
627 | ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP)(((0) & 0x000000FF) << 0); | ||||
628 | ib.length_dw = 8; | ||||
629 | |||||
630 | r = amdgpu_ib_schedule(ring, 1, &ib, NULL((void *)0), &f); | ||||
631 | if (r) | ||||
632 | goto err1; | ||||
633 | |||||
634 | r = dma_fence_wait_timeout(f, false0, timeout); | ||||
635 | if (r == 0) { | ||||
636 | r = -ETIMEDOUT60; | ||||
637 | goto err1; | ||||
638 | } else if (r < 0) { | ||||
639 | goto err1; | ||||
640 | } | ||||
641 | tmp = le32_to_cpu(adev->wb.wb[index])((__uint32_t)(adev->wb.wb[index])); | ||||
642 | if (tmp == 0xDEADBEEF) | ||||
643 | r = 0; | ||||
644 | else | ||||
645 | r = -EINVAL22; | ||||
646 | |||||
647 | err1: | ||||
648 | amdgpu_ib_free(adev, &ib, NULL((void *)0)); | ||||
649 | dma_fence_put(f); | ||||
650 | err0: | ||||
651 | amdgpu_device_wb_free(adev, index); | ||||
652 | return r; | ||||
653 | } | ||||
654 | |||||
655 | /** | ||||
656 | * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART | ||||
657 | * | ||||
658 | * @ib: indirect buffer to fill with commands | ||||
659 | * @pe: addr of the page entry | ||||
660 | * @src: src addr to copy from | ||||
661 | * @count: number of page entries to update | ||||
662 | * | ||||
663 | * Update PTEs by copying them from the GART using sDMA (CIK). | ||||
664 | */ | ||||
665 | static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, | ||||
666 | uint64_t pe, uint64_t src, | ||||
667 | unsigned count) | ||||
668 | { | ||||
669 | unsigned bytes = count * 8; | ||||
670 | |||||
671 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY)(((1) & 0x000000FF) << 0) | | ||||
672 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR)(((0) & 0x000000FF) << 8); | ||||
673 | ib->ptr[ib->length_dw++] = bytes; | ||||
674 | ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ | ||||
675 | ib->ptr[ib->length_dw++] = lower_32_bits(src)((u32)(src)); | ||||
676 | ib->ptr[ib->length_dw++] = upper_32_bits(src)((u32)(((src) >> 16) >> 16)); | ||||
677 | ib->ptr[ib->length_dw++] = lower_32_bits(pe)((u32)(pe)); | ||||
678 | ib->ptr[ib->length_dw++] = upper_32_bits(pe)((u32)(((pe) >> 16) >> 16)); | ||||
679 | } | ||||
680 | |||||
681 | /** | ||||
682 | * sdma_v2_4_vm_write_pte - update PTEs by writing them manually | ||||
683 | * | ||||
684 | * @ib: indirect buffer to fill with commands | ||||
685 | * @pe: addr of the page entry | ||||
686 | * @value: dst addr to write into pe | ||||
687 | * @count: number of page entries to update | ||||
688 | * @incr: increase next addr by incr bytes | ||||
689 | * | ||||
690 | * Update PTEs by writing them manually using sDMA (CIK). | ||||
691 | */ | ||||
692 | static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, | ||||
693 | uint64_t value, unsigned count, | ||||
694 | uint32_t incr) | ||||
695 | { | ||||
696 | unsigned ndw = count * 2; | ||||
697 | |||||
698 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE)(((2) & 0x000000FF) << 0) | | ||||
699 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)(((0) & 0x000000FF) << 8); | ||||
700 | ib->ptr[ib->length_dw++] = pe; | ||||
701 | ib->ptr[ib->length_dw++] = upper_32_bits(pe)((u32)(((pe) >> 16) >> 16)); | ||||
702 | ib->ptr[ib->length_dw++] = ndw; | ||||
703 | for (; ndw > 0; ndw -= 2) { | ||||
704 | ib->ptr[ib->length_dw++] = lower_32_bits(value)((u32)(value)); | ||||
705 | ib->ptr[ib->length_dw++] = upper_32_bits(value)((u32)(((value) >> 16) >> 16)); | ||||
706 | value += incr; | ||||
707 | } | ||||
708 | } | ||||
709 | |||||
710 | /** | ||||
711 | * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA | ||||
712 | * | ||||
713 | * @ib: indirect buffer to fill with commands | ||||
714 | * @pe: addr of the page entry | ||||
715 | * @addr: dst addr to write into pe | ||||
716 | * @count: number of page entries to update | ||||
717 | * @incr: increase next addr by incr bytes | ||||
718 | * @flags: access flags | ||||
719 | * | ||||
720 | * Update the page tables using sDMA (CIK). | ||||
721 | */ | ||||
722 | static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, | ||||
723 | uint64_t addr, unsigned count, | ||||
724 | uint32_t incr, uint64_t flags) | ||||
725 | { | ||||
726 | /* for physically contiguous pages (vram) */ | ||||
727 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE)(((12) & 0x000000FF) << 0); | ||||
728 | ib->ptr[ib->length_dw++] = lower_32_bits(pe)((u32)(pe)); /* dst addr */ | ||||
729 | ib->ptr[ib->length_dw++] = upper_32_bits(pe)((u32)(((pe) >> 16) >> 16)); | ||||
730 | ib->ptr[ib->length_dw++] = lower_32_bits(flags)((u32)(flags)); /* mask */ | ||||
731 | ib->ptr[ib->length_dw++] = upper_32_bits(flags)((u32)(((flags) >> 16) >> 16)); | ||||
732 | ib->ptr[ib->length_dw++] = lower_32_bits(addr)((u32)(addr)); /* value */ | ||||
733 | ib->ptr[ib->length_dw++] = upper_32_bits(addr)((u32)(((addr) >> 16) >> 16)); | ||||
734 | ib->ptr[ib->length_dw++] = incr; /* increment size */ | ||||
735 | ib->ptr[ib->length_dw++] = 0; | ||||
736 | ib->ptr[ib->length_dw++] = count; /* number of entries */ | ||||
737 | } | ||||
738 | |||||
739 | /** | ||||
740 | * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw | ||||
741 | * | ||||
742 | * @ring: amdgpu_ring structure holding ring information | ||||
743 | * @ib: indirect buffer to fill with padding | ||||
744 | * | ||||
745 | */ | ||||
746 | static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | ||||
747 | { | ||||
748 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); | ||||
749 | u32 pad_count; | ||||
750 | int i; | ||||
751 | |||||
752 | pad_count = (-ib->length_dw) & 7; | ||||
753 | for (i = 0; i < pad_count; i++) | ||||
754 | if (sdma && sdma->burst_nop && (i == 0)) | ||||
755 | ib->ptr[ib->length_dw++] = | ||||
756 | SDMA_PKT_HEADER_OP(SDMA_OP_NOP)(((0) & 0x000000FF) << 0) | | ||||
757 | SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1)(((pad_count - 1) & 0x00003FFF) << 16); | ||||
758 | else | ||||
759 | ib->ptr[ib->length_dw++] = | ||||
760 | SDMA_PKT_HEADER_OP(SDMA_OP_NOP)(((0) & 0x000000FF) << 0); | ||||
761 | } | ||||
762 | |||||
763 | /** | ||||
764 | * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline | ||||
765 | * | ||||
766 | * @ring: amdgpu_ring pointer | ||||
767 | * | ||||
768 | * Make sure all previous operations are completed (CIK). | ||||
769 | */ | ||||
770 | static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | ||||
771 | { | ||||
772 | uint32_t seq = ring->fence_drv.sync_seq; | ||||
773 | uint64_t addr = ring->fence_drv.gpu_addr; | ||||
774 | |||||
775 | /* wait for idle */ | ||||
776 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM)(((8) & 0x000000FF) << 0) | | ||||
777 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0)(((0) & 0x00000001) << 26) | | ||||
778 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)(((3) & 0x00000007) << 28) | /* equal */ | ||||
779 | SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1)(((1) & 0x00000001) << 31)); | ||||
780 | amdgpu_ring_write(ring, addr & 0xfffffffc); | ||||
781 | amdgpu_ring_write(ring, upper_32_bits(addr)((u32)(((addr) >> 16) >> 16)) & 0xffffffff); | ||||
782 | amdgpu_ring_write(ring, seq); /* reference */ | ||||
783 | amdgpu_ring_write(ring, 0xffffffff); /* mask */ | ||||
784 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff)(((0xfff) & 0x00000FFF) << 16) | | ||||
785 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)(((4) & 0x0000FFFF) << 0)); /* retry count, poll interval */ | ||||
786 | } | ||||
787 | |||||
788 | /** | ||||
789 | * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA | ||||
790 | * | ||||
791 | * @ring: amdgpu_ring pointer | ||||
792 | * @vmid: vmid number to use | ||||
793 | * @pd_addr: address | ||||
794 | * | ||||
795 | * Update the page table base and flush the VM TLB | ||||
796 | * using sDMA (VI). | ||||
797 | */ | ||||
798 | static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, | ||||
799 | unsigned vmid, uint64_t pd_addr) | ||||
800 | { | ||||
801 | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr)(ring)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((ring ), (vmid), (pd_addr)); | ||||
802 | |||||
803 | /* wait for flush */ | ||||
804 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM)(((8) & 0x000000FF) << 0) | | ||||
805 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0)(((0) & 0x00000001) << 26) | | ||||
806 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)(((0) & 0x00000007) << 28)); /* always */ | ||||
807 | amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST0x51e << 2); | ||||
808 | amdgpu_ring_write(ring, 0); | ||||
809 | amdgpu_ring_write(ring, 0); /* reference */ | ||||
810 | amdgpu_ring_write(ring, 0); /* mask */ | ||||
811 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff)(((0xfff) & 0x00000FFF) << 16) | | ||||
812 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)(((10) & 0x0000FFFF) << 0)); /* retry count, poll interval */ | ||||
813 | } | ||||
814 | |||||
815 | static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring, | ||||
816 | uint32_t reg, uint32_t val) | ||||
817 | { | ||||
818 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE)(((14) & 0x000000FF) << 0) | | ||||
819 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)(((0xf) & 0x0000000F) << 28)); | ||||
820 | amdgpu_ring_write(ring, reg); | ||||
821 | amdgpu_ring_write(ring, val); | ||||
822 | } | ||||
823 | |||||
824 | static int sdma_v2_4_early_init(void *handle) | ||||
825 | { | ||||
826 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
827 | |||||
828 | adev->sdma.num_instances = SDMA_MAX_INSTANCE2; | ||||
829 | |||||
830 | sdma_v2_4_set_ring_funcs(adev); | ||||
831 | sdma_v2_4_set_buffer_funcs(adev); | ||||
832 | sdma_v2_4_set_vm_pte_funcs(adev); | ||||
833 | sdma_v2_4_set_irq_funcs(adev); | ||||
834 | |||||
835 | return 0; | ||||
836 | } | ||||
837 | |||||
838 | static int sdma_v2_4_sw_init(void *handle) | ||||
839 | { | ||||
840 | struct amdgpu_ring *ring; | ||||
841 | int r, i; | ||||
842 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
843 | |||||
844 | /* SDMA trap event */ | ||||
845 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY0, VISLANDS30_IV_SRCID_SDMA_TRAP0x000000e0, | ||||
846 | &adev->sdma.trap_irq); | ||||
847 | if (r) | ||||
848 | return r; | ||||
849 | |||||
850 | /* SDMA Privileged inst */ | ||||
851 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY0, 241, | ||||
852 | &adev->sdma.illegal_inst_irq); | ||||
853 | if (r) | ||||
854 | return r; | ||||
855 | |||||
856 | /* SDMA Privileged inst */ | ||||
857 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY0, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE0x000000f7, | ||||
858 | &adev->sdma.illegal_inst_irq); | ||||
859 | if (r) | ||||
860 | return r; | ||||
861 | |||||
862 | r = sdma_v2_4_init_microcode(adev); | ||||
863 | if (r) { | ||||
864 | DRM_ERROR("Failed to load sdma firmware!\n")__drm_err("Failed to load sdma firmware!\n"); | ||||
865 | return r; | ||||
866 | } | ||||
867 | |||||
868 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
869 | ring = &adev->sdma.instance[i].ring; | ||||
870 | ring->ring_obj = NULL((void *)0); | ||||
871 | ring->use_doorbell = false0; | ||||
872 | snprintf(ring->name, sizeof(ring->name), "sdma%d", i); | ||||
873 | r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, | ||||
874 | (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : | ||||
875 | AMDGPU_SDMA_IRQ_INSTANCE1, | ||||
876 | AMDGPU_RING_PRIO_DEFAULT, NULL((void *)0)); | ||||
877 | if (r) | ||||
878 | return r; | ||||
879 | } | ||||
880 | |||||
881 | return r; | ||||
882 | } | ||||
883 | |||||
884 | static int sdma_v2_4_sw_fini(void *handle) | ||||
885 | { | ||||
886 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
887 | int i; | ||||
888 | |||||
889 | for (i = 0; i < adev->sdma.num_instances; i++) | ||||
890 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | ||||
891 | |||||
892 | sdma_v2_4_free_microcode(adev); | ||||
893 | return 0; | ||||
894 | } | ||||
895 | |||||
896 | static int sdma_v2_4_hw_init(void *handle) | ||||
897 | { | ||||
898 | int r; | ||||
899 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
900 | |||||
901 | sdma_v2_4_init_golden_registers(adev); | ||||
902 | |||||
903 | r = sdma_v2_4_start(adev); | ||||
904 | if (r) | ||||
905 | return r; | ||||
906 | |||||
907 | return r; | ||||
908 | } | ||||
909 | |||||
910 | static int sdma_v2_4_hw_fini(void *handle) | ||||
911 | { | ||||
912 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
913 | |||||
914 | sdma_v2_4_enable(adev, false0); | ||||
915 | |||||
916 | return 0; | ||||
917 | } | ||||
918 | |||||
919 | static int sdma_v2_4_suspend(void *handle) | ||||
920 | { | ||||
921 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
922 | |||||
923 | return sdma_v2_4_hw_fini(adev); | ||||
| |||||
924 | } | ||||
925 | |||||
926 | static int sdma_v2_4_resume(void *handle) | ||||
927 | { | ||||
928 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
929 | |||||
930 | return sdma_v2_4_hw_init(adev); | ||||
931 | } | ||||
932 | |||||
933 | static bool_Bool sdma_v2_4_is_idle(void *handle) | ||||
934 | { | ||||
935 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
936 | u32 tmp = RREG32(mmSRBM_STATUS2)amdgpu_device_rreg(adev, (0x393), 0); | ||||
937 | |||||
938 | if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK0x20 | | ||||
939 | SRBM_STATUS2__SDMA1_BUSY_MASK0x40)) | ||||
940 | return false0; | ||||
941 | |||||
942 | return true1; | ||||
943 | } | ||||
944 | |||||
945 | static int sdma_v2_4_wait_for_idle(void *handle) | ||||
946 | { | ||||
947 | unsigned i; | ||||
948 | u32 tmp; | ||||
949 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
950 | |||||
951 | for (i = 0; i < adev->usec_timeout; i++) { | ||||
952 | tmp = RREG32(mmSRBM_STATUS2)amdgpu_device_rreg(adev, (0x393), 0) & (SRBM_STATUS2__SDMA_BUSY_MASK0x20 | | ||||
953 | SRBM_STATUS2__SDMA1_BUSY_MASK0x40); | ||||
954 | |||||
955 | if (!tmp) | ||||
956 | return 0; | ||||
957 | udelay(1); | ||||
958 | } | ||||
959 | return -ETIMEDOUT60; | ||||
960 | } | ||||
961 | |||||
962 | static int sdma_v2_4_soft_reset(void *handle) | ||||
963 | { | ||||
964 | u32 srbm_soft_reset = 0; | ||||
965 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||||
966 | u32 tmp = RREG32(mmSRBM_STATUS2)amdgpu_device_rreg(adev, (0x393), 0); | ||||
967 | |||||
968 | if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK0x20) { | ||||
969 | /* sdma0 */ | ||||
970 | tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET)amdgpu_device_rreg(adev, (0x3412 + 0x0), 0); | ||||
971 | tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0)(((tmp) & ~0x1) | (0x1 & ((0) << 0x0))); | ||||
972 | WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp)amdgpu_device_wreg(adev, (0x3412 + 0x0), (tmp), 0); | ||||
973 | srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK0x100000; | ||||
974 | } | ||||
975 | if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK0x40) { | ||||
976 | /* sdma1 */ | ||||
977 | tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET)amdgpu_device_rreg(adev, (0x3412 + 0x200), 0); | ||||
978 | tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0)(((tmp) & ~0x1) | (0x1 & ((0) << 0x0))); | ||||
979 | WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp)amdgpu_device_wreg(adev, (0x3412 + 0x200), (tmp), 0); | ||||
980 | srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK0x40; | ||||
981 | } | ||||
982 | |||||
983 | if (srbm_soft_reset) { | ||||
984 | tmp = RREG32(mmSRBM_SOFT_RESET)amdgpu_device_rreg(adev, (0x398), 0); | ||||
985 | tmp |= srbm_soft_reset; | ||||
986 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp)do { } while(0); | ||||
987 | WREG32(mmSRBM_SOFT_RESET, tmp)amdgpu_device_wreg(adev, (0x398), (tmp), 0); | ||||
988 | tmp = RREG32(mmSRBM_SOFT_RESET)amdgpu_device_rreg(adev, (0x398), 0); | ||||
989 | |||||
990 | udelay(50); | ||||
991 | |||||
992 | tmp &= ~srbm_soft_reset; | ||||
993 | WREG32(mmSRBM_SOFT_RESET, tmp)amdgpu_device_wreg(adev, (0x398), (tmp), 0); | ||||
994 | tmp = RREG32(mmSRBM_SOFT_RESET)amdgpu_device_rreg(adev, (0x398), 0); | ||||
995 | |||||
996 | /* Wait a little for things to settle down */ | ||||
997 | udelay(50); | ||||
998 | } | ||||
999 | |||||
1000 | return 0; | ||||
1001 | } | ||||
1002 | |||||
1003 | static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev, | ||||
1004 | struct amdgpu_irq_src *src, | ||||
1005 | unsigned type, | ||||
1006 | enum amdgpu_interrupt_state state) | ||||
1007 | { | ||||
1008 | u32 sdma_cntl; | ||||
1009 | |||||
1010 | switch (type) { | ||||
1011 | case AMDGPU_SDMA_IRQ_INSTANCE0: | ||||
1012 | switch (state) { | ||||
1013 | case AMDGPU_IRQ_STATE_DISABLE: | ||||
1014 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET)amdgpu_device_rreg(adev, (0x3404 + 0x0), 0); | ||||
1015 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0)(((sdma_cntl) & ~0x1) | (0x1 & ((0) << 0x0))); | ||||
1016 | WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl)amdgpu_device_wreg(adev, (0x3404 + 0x0), (sdma_cntl), 0); | ||||
1017 | break; | ||||
1018 | case AMDGPU_IRQ_STATE_ENABLE: | ||||
1019 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET)amdgpu_device_rreg(adev, (0x3404 + 0x0), 0); | ||||
1020 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1)(((sdma_cntl) & ~0x1) | (0x1 & ((1) << 0x0))); | ||||
1021 | WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl)amdgpu_device_wreg(adev, (0x3404 + 0x0), (sdma_cntl), 0); | ||||
1022 | break; | ||||
1023 | default: | ||||
1024 | break; | ||||
1025 | } | ||||
1026 | break; | ||||
1027 | case AMDGPU_SDMA_IRQ_INSTANCE1: | ||||
1028 | switch (state) { | ||||
1029 | case AMDGPU_IRQ_STATE_DISABLE: | ||||
1030 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET)amdgpu_device_rreg(adev, (0x3404 + 0x200), 0); | ||||
1031 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0)(((sdma_cntl) & ~0x1) | (0x1 & ((0) << 0x0))); | ||||
1032 | WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl)amdgpu_device_wreg(adev, (0x3404 + 0x200), (sdma_cntl), 0); | ||||
1033 | break; | ||||
1034 | case AMDGPU_IRQ_STATE_ENABLE: | ||||
1035 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET)amdgpu_device_rreg(adev, (0x3404 + 0x200), 0); | ||||
1036 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1)(((sdma_cntl) & ~0x1) | (0x1 & ((1) << 0x0))); | ||||
1037 | WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl)amdgpu_device_wreg(adev, (0x3404 + 0x200), (sdma_cntl), 0); | ||||
1038 | break; | ||||
1039 | default: | ||||
1040 | break; | ||||
1041 | } | ||||
1042 | break; | ||||
1043 | default: | ||||
1044 | break; | ||||
1045 | } | ||||
1046 | return 0; | ||||
1047 | } | ||||
1048 | |||||
1049 | static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev, | ||||
1050 | struct amdgpu_irq_src *source, | ||||
1051 | struct amdgpu_iv_entry *entry) | ||||
1052 | { | ||||
1053 | u8 instance_id, queue_id; | ||||
1054 | |||||
1055 | instance_id = (entry->ring_id & 0x3) >> 0; | ||||
1056 | queue_id = (entry->ring_id & 0xc) >> 2; | ||||
1057 | DRM_DEBUG("IH: SDMA trap\n")___drm_dbg(((void *)0), DRM_UT_CORE, "IH: SDMA trap\n"); | ||||
1058 | switch (instance_id) { | ||||
1059 | case 0: | ||||
1060 | switch (queue_id) { | ||||
1061 | case 0: | ||||
1062 | amdgpu_fence_process(&adev->sdma.instance[0].ring); | ||||
1063 | break; | ||||
1064 | case 1: | ||||
1065 | /* XXX compute */ | ||||
1066 | break; | ||||
1067 | case 2: | ||||
1068 | /* XXX compute */ | ||||
1069 | break; | ||||
1070 | } | ||||
1071 | break; | ||||
1072 | case 1: | ||||
1073 | switch (queue_id) { | ||||
1074 | case 0: | ||||
1075 | amdgpu_fence_process(&adev->sdma.instance[1].ring); | ||||
1076 | break; | ||||
1077 | case 1: | ||||
1078 | /* XXX compute */ | ||||
1079 | break; | ||||
1080 | case 2: | ||||
1081 | /* XXX compute */ | ||||
1082 | break; | ||||
1083 | } | ||||
1084 | break; | ||||
1085 | } | ||||
1086 | return 0; | ||||
1087 | } | ||||
1088 | |||||
1089 | static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev, | ||||
1090 | struct amdgpu_irq_src *source, | ||||
1091 | struct amdgpu_iv_entry *entry) | ||||
1092 | { | ||||
1093 | u8 instance_id, queue_id; | ||||
1094 | |||||
1095 | DRM_ERROR("Illegal instruction in SDMA command stream\n")__drm_err("Illegal instruction in SDMA command stream\n"); | ||||
1096 | instance_id = (entry->ring_id & 0x3) >> 0; | ||||
1097 | queue_id = (entry->ring_id & 0xc) >> 2; | ||||
1098 | |||||
1099 | if (instance_id <= 1 && queue_id == 0) | ||||
1100 | drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); | ||||
1101 | return 0; | ||||
1102 | } | ||||
1103 | |||||
1104 | static int sdma_v2_4_set_clockgating_state(void *handle, | ||||
1105 | enum amd_clockgating_state state) | ||||
1106 | { | ||||
1107 | /* XXX handled via the smc on VI */ | ||||
1108 | return 0; | ||||
1109 | } | ||||
1110 | |||||
1111 | static int sdma_v2_4_set_powergating_state(void *handle, | ||||
1112 | enum amd_powergating_state state) | ||||
1113 | { | ||||
1114 | return 0; | ||||
1115 | } | ||||
1116 | |||||
1117 | static const struct amd_ip_funcs sdma_v2_4_ip_funcs = { | ||||
1118 | .name = "sdma_v2_4", | ||||
1119 | .early_init = sdma_v2_4_early_init, | ||||
1120 | .late_init = NULL((void *)0), | ||||
1121 | .sw_init = sdma_v2_4_sw_init, | ||||
1122 | .sw_fini = sdma_v2_4_sw_fini, | ||||
1123 | .hw_init = sdma_v2_4_hw_init, | ||||
1124 | .hw_fini = sdma_v2_4_hw_fini, | ||||
1125 | .suspend = sdma_v2_4_suspend, | ||||
1126 | .resume = sdma_v2_4_resume, | ||||
1127 | .is_idle = sdma_v2_4_is_idle, | ||||
1128 | .wait_for_idle = sdma_v2_4_wait_for_idle, | ||||
1129 | .soft_reset = sdma_v2_4_soft_reset, | ||||
1130 | .set_clockgating_state = sdma_v2_4_set_clockgating_state, | ||||
1131 | .set_powergating_state = sdma_v2_4_set_powergating_state, | ||||
1132 | }; | ||||
1133 | |||||
1134 | static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { | ||||
1135 | .type = AMDGPU_RING_TYPE_SDMA, | ||||
1136 | .align_mask = 0xf, | ||||
1137 | .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP)(((0) & 0x000000FF) << 0), | ||||
1138 | .support_64bit_ptrs = false0, | ||||
1139 | .secure_submission_supported = true1, | ||||
1140 | .get_rptr = sdma_v2_4_ring_get_rptr, | ||||
1141 | .get_wptr = sdma_v2_4_ring_get_wptr, | ||||
1142 | .set_wptr = sdma_v2_4_ring_set_wptr, | ||||
1143 | .emit_frame_size = | ||||
1144 | 6 + /* sdma_v2_4_ring_emit_hdp_flush */ | ||||
1145 | 3 + /* hdp invalidate */ | ||||
1146 | 6 + /* sdma_v2_4_ring_emit_pipeline_sync */ | ||||
1147 | VI_FLUSH_GPU_TLB_NUM_WREG3 * 3 + 6 + /* sdma_v2_4_ring_emit_vm_flush */ | ||||
1148 | 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */ | ||||
1149 | .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */ | ||||
1150 | .emit_ib = sdma_v2_4_ring_emit_ib, | ||||
1151 | .emit_fence = sdma_v2_4_ring_emit_fence, | ||||
1152 | .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync, | ||||
1153 | .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, | ||||
1154 | .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, | ||||
1155 | .test_ring = sdma_v2_4_ring_test_ring, | ||||
1156 | .test_ib = sdma_v2_4_ring_test_ib, | ||||
1157 | .insert_nop = sdma_v2_4_ring_insert_nop, | ||||
1158 | .pad_ib = sdma_v2_4_ring_pad_ib, | ||||
1159 | .emit_wreg = sdma_v2_4_ring_emit_wreg, | ||||
1160 | }; | ||||
1161 | |||||
1162 | static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) | ||||
1163 | { | ||||
1164 | int i; | ||||
1165 | |||||
1166 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
1167 | adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs; | ||||
1168 | adev->sdma.instance[i].ring.me = i; | ||||
1169 | } | ||||
1170 | } | ||||
1171 | |||||
1172 | static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = { | ||||
1173 | .set = sdma_v2_4_set_trap_irq_state, | ||||
1174 | .process = sdma_v2_4_process_trap_irq, | ||||
1175 | }; | ||||
1176 | |||||
1177 | static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = { | ||||
1178 | .process = sdma_v2_4_process_illegal_inst_irq, | ||||
1179 | }; | ||||
1180 | |||||
1181 | static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) | ||||
1182 | { | ||||
1183 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; | ||||
1184 | adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs; | ||||
1185 | adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs; | ||||
1186 | } | ||||
1187 | |||||
1188 | /** | ||||
1189 | * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine | ||||
1190 | * | ||||
1191 | * @ib: indirect buffer to copy to | ||||
1192 | * @src_offset: src GPU address | ||||
1193 | * @dst_offset: dst GPU address | ||||
1194 | * @byte_count: number of bytes to xfer | ||||
1195 | * @tmz: unused | ||||
1196 | * | ||||
1197 | * Copy GPU buffers using the DMA engine (VI). | ||||
1198 | * Used by the amdgpu ttm implementation to move pages if | ||||
1199 | * registered as the asic copy callback. | ||||
1200 | */ | ||||
1201 | static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib, | ||||
1202 | uint64_t src_offset, | ||||
1203 | uint64_t dst_offset, | ||||
1204 | uint32_t byte_count, | ||||
1205 | bool_Bool tmz) | ||||
1206 | { | ||||
1207 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY)(((1) & 0x000000FF) << 0) | | ||||
1208 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR)(((0) & 0x000000FF) << 8); | ||||
1209 | ib->ptr[ib->length_dw++] = byte_count; | ||||
1210 | ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ | ||||
1211 | ib->ptr[ib->length_dw++] = lower_32_bits(src_offset)((u32)(src_offset)); | ||||
1212 | ib->ptr[ib->length_dw++] = upper_32_bits(src_offset)((u32)(((src_offset) >> 16) >> 16)); | ||||
1213 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset)((u32)(dst_offset)); | ||||
1214 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset)((u32)(((dst_offset) >> 16) >> 16)); | ||||
1215 | } | ||||
1216 | |||||
1217 | /** | ||||
1218 | * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine | ||||
1219 | * | ||||
1220 | * @ib: indirect buffer to copy to | ||||
1221 | * @src_data: value to write to buffer | ||||
1222 | * @dst_offset: dst GPU address | ||||
1223 | * @byte_count: number of bytes to xfer | ||||
1224 | * | ||||
1225 | * Fill GPU buffers using the DMA engine (VI). | ||||
1226 | */ | ||||
1227 | static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib, | ||||
1228 | uint32_t src_data, | ||||
1229 | uint64_t dst_offset, | ||||
1230 | uint32_t byte_count) | ||||
1231 | { | ||||
1232 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL)(((11) & 0x000000FF) << 0); | ||||
1233 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset)((u32)(dst_offset)); | ||||
1234 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset)((u32)(((dst_offset) >> 16) >> 16)); | ||||
1235 | ib->ptr[ib->length_dw++] = src_data; | ||||
1236 | ib->ptr[ib->length_dw++] = byte_count; | ||||
1237 | } | ||||
1238 | |||||
1239 | static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = { | ||||
1240 | .copy_max_bytes = 0x1fffff, | ||||
1241 | .copy_num_dw = 7, | ||||
1242 | .emit_copy_buffer = sdma_v2_4_emit_copy_buffer, | ||||
1243 | |||||
1244 | .fill_max_bytes = 0x1fffff, | ||||
1245 | .fill_num_dw = 7, | ||||
1246 | .emit_fill_buffer = sdma_v2_4_emit_fill_buffer, | ||||
1247 | }; | ||||
1248 | |||||
1249 | static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev) | ||||
1250 | { | ||||
1251 | adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; | ||||
1252 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; | ||||
1253 | } | ||||
1254 | |||||
1255 | static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { | ||||
1256 | .copy_pte_num_dw = 7, | ||||
1257 | .copy_pte = sdma_v2_4_vm_copy_pte, | ||||
1258 | |||||
1259 | .write_pte = sdma_v2_4_vm_write_pte, | ||||
1260 | .set_pte_pde = sdma_v2_4_vm_set_pte_pde, | ||||
1261 | }; | ||||
1262 | |||||
1263 | static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) | ||||
1264 | { | ||||
1265 | unsigned i; | ||||
1266 | |||||
1267 | adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; | ||||
1268 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||||
1269 | adev->vm_manager.vm_pte_scheds[i] = | ||||
1270 | &adev->sdma.instance[i].ring.sched; | ||||
1271 | } | ||||
1272 | adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; | ||||
1273 | } | ||||
1274 | |||||
1275 | const struct amdgpu_ip_block_version sdma_v2_4_ip_block = | ||||
1276 | { | ||||
1277 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||||
1278 | .major = 2, | ||||
1279 | .minor = 4, | ||||
1280 | .rev = 0, | ||||
1281 | .funcs = &sdma_v2_4_ip_funcs, | ||||
1282 | }; |