File: | dev/pci/drm/amd/amdgpu/uvd_v6_0.c |
Warning: | line 1215, column 3 Value stored to 'tmp' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: Christian König <christian.koenig@amd.com> |
23 | */ |
24 | |
25 | #include <linux/firmware.h> |
26 | |
27 | #include "amdgpu.h" |
28 | #include "amdgpu_uvd.h" |
29 | #include "vid.h" |
30 | #include "uvd/uvd_6_0_d.h" |
31 | #include "uvd/uvd_6_0_sh_mask.h" |
32 | #include "oss/oss_2_0_d.h" |
33 | #include "oss/oss_2_0_sh_mask.h" |
34 | #include "smu/smu_7_1_3_d.h" |
35 | #include "smu/smu_7_1_3_sh_mask.h" |
36 | #include "bif/bif_5_1_d.h" |
37 | #include "gmc/gmc_8_1_d.h" |
38 | #include "vi.h" |
39 | #include "ivsrcid/ivsrcid_vislands30.h" |
40 | |
41 | /* Polaris10/11/12 firmware version */ |
42 | #define FW_1_130_16((1 << 24) | (130 << 16) | (16 << 8)) ((1 << 24) | (130 << 16) | (16 << 8)) |
43 | |
44 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); |
45 | static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev); |
46 | |
47 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev); |
48 | static int uvd_v6_0_start(struct amdgpu_device *adev); |
49 | static void uvd_v6_0_stop(struct amdgpu_device *adev); |
50 | static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev); |
51 | static int uvd_v6_0_set_clockgating_state(void *handle, |
52 | enum amd_clockgating_state state); |
53 | static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev, |
54 | bool_Bool enable); |
55 | |
56 | /** |
57 | * uvd_v6_0_enc_support - get encode support status |
58 | * |
59 | * @adev: amdgpu_device pointer |
60 | * |
61 | * Returns the current hardware encode support status |
62 | */ |
63 | static inline bool_Bool uvd_v6_0_enc_support(struct amdgpu_device *adev) |
64 | { |
65 | return ((adev->asic_type >= CHIP_POLARIS10) && |
66 | (adev->asic_type <= CHIP_VEGAM) && |
67 | (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16((1 << 24) | (130 << 16) | (16 << 8)))); |
68 | } |
69 | |
70 | /** |
71 | * uvd_v6_0_ring_get_rptr - get read pointer |
72 | * |
73 | * @ring: amdgpu_ring pointer |
74 | * |
75 | * Returns the current hardware read pointer |
76 | */ |
77 | static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) |
78 | { |
79 | struct amdgpu_device *adev = ring->adev; |
80 | |
81 | return RREG32(mmUVD_RBC_RB_RPTR)amdgpu_device_rreg(adev, (0x3da4), 0); |
82 | } |
83 | |
84 | /** |
85 | * uvd_v6_0_enc_ring_get_rptr - get enc read pointer |
86 | * |
87 | * @ring: amdgpu_ring pointer |
88 | * |
89 | * Returns the current hardware enc read pointer |
90 | */ |
91 | static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring) |
92 | { |
93 | struct amdgpu_device *adev = ring->adev; |
94 | |
95 | if (ring == &adev->uvd.inst->ring_enc[0]) |
96 | return RREG32(mmUVD_RB_RPTR)amdgpu_device_rreg(adev, (0x3c29), 0); |
97 | else |
98 | return RREG32(mmUVD_RB_RPTR2)amdgpu_device_rreg(adev, (0x3c24), 0); |
99 | } |
100 | /** |
101 | * uvd_v6_0_ring_get_wptr - get write pointer |
102 | * |
103 | * @ring: amdgpu_ring pointer |
104 | * |
105 | * Returns the current hardware write pointer |
106 | */ |
107 | static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) |
108 | { |
109 | struct amdgpu_device *adev = ring->adev; |
110 | |
111 | return RREG32(mmUVD_RBC_RB_WPTR)amdgpu_device_rreg(adev, (0x3da5), 0); |
112 | } |
113 | |
114 | /** |
115 | * uvd_v6_0_enc_ring_get_wptr - get enc write pointer |
116 | * |
117 | * @ring: amdgpu_ring pointer |
118 | * |
119 | * Returns the current hardware enc write pointer |
120 | */ |
121 | static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring) |
122 | { |
123 | struct amdgpu_device *adev = ring->adev; |
124 | |
125 | if (ring == &adev->uvd.inst->ring_enc[0]) |
126 | return RREG32(mmUVD_RB_WPTR)amdgpu_device_rreg(adev, (0x3c2a), 0); |
127 | else |
128 | return RREG32(mmUVD_RB_WPTR2)amdgpu_device_rreg(adev, (0x3c25), 0); |
129 | } |
130 | |
131 | /** |
132 | * uvd_v6_0_ring_set_wptr - set write pointer |
133 | * |
134 | * @ring: amdgpu_ring pointer |
135 | * |
136 | * Commits the write pointer to the hardware |
137 | */ |
138 | static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) |
139 | { |
140 | struct amdgpu_device *adev = ring->adev; |
141 | |
142 | WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, (0x3da5), (((u32)(ring->wptr))), 0 ); |
143 | } |
144 | |
145 | /** |
146 | * uvd_v6_0_enc_ring_set_wptr - set enc write pointer |
147 | * |
148 | * @ring: amdgpu_ring pointer |
149 | * |
150 | * Commits the enc write pointer to the hardware |
151 | */ |
152 | static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) |
153 | { |
154 | struct amdgpu_device *adev = ring->adev; |
155 | |
156 | if (ring == &adev->uvd.inst->ring_enc[0]) |
157 | WREG32(mmUVD_RB_WPTR,amdgpu_device_wreg(adev, (0x3c2a), (((u32)(ring->wptr))), 0 ) |
158 | lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, (0x3c2a), (((u32)(ring->wptr))), 0 ); |
159 | else |
160 | WREG32(mmUVD_RB_WPTR2,amdgpu_device_wreg(adev, (0x3c25), (((u32)(ring->wptr))), 0 ) |
161 | lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, (0x3c25), (((u32)(ring->wptr))), 0 ); |
162 | } |
163 | |
164 | /** |
165 | * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working |
166 | * |
167 | * @ring: the engine to test on |
168 | * |
169 | */ |
170 | static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) |
171 | { |
172 | struct amdgpu_device *adev = ring->adev; |
173 | uint32_t rptr; |
174 | unsigned i; |
175 | int r; |
176 | |
177 | r = amdgpu_ring_alloc(ring, 16); |
178 | if (r) |
179 | return r; |
180 | |
181 | rptr = amdgpu_ring_get_rptr(ring)(ring)->funcs->get_rptr((ring)); |
182 | |
183 | amdgpu_ring_write(ring, HEVC_ENC_CMD_END0x00000001); |
184 | amdgpu_ring_commit(ring); |
185 | |
186 | for (i = 0; i < adev->usec_timeout; i++) { |
187 | if (amdgpu_ring_get_rptr(ring)(ring)->funcs->get_rptr((ring)) != rptr) |
188 | break; |
189 | udelay(1); |
190 | } |
191 | |
192 | if (i >= adev->usec_timeout) |
193 | r = -ETIMEDOUT60; |
194 | |
195 | return r; |
196 | } |
197 | |
198 | /** |
199 | * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg |
200 | * |
201 | * @ring: ring we should submit the msg to |
202 | * @handle: session handle to use |
203 | * @bo: amdgpu object for which we query the offset |
204 | * @fence: optional fence to return |
205 | * |
206 | * Open up a stream for HW test |
207 | */ |
208 | static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, |
209 | struct amdgpu_bo *bo, |
210 | struct dma_fence **fence) |
211 | { |
212 | const unsigned ib_size_dw = 16; |
213 | struct amdgpu_job *job; |
214 | struct amdgpu_ib *ib; |
215 | struct dma_fence *f = NULL((void *)0); |
216 | uint64_t addr; |
217 | int i, r; |
218 | |
219 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, |
220 | AMDGPU_IB_POOL_DIRECT, &job); |
221 | if (r) |
222 | return r; |
223 | |
224 | ib = &job->ibs[0]; |
225 | addr = amdgpu_bo_gpu_offset(bo); |
226 | |
227 | ib->length_dw = 0; |
228 | ib->ptr[ib->length_dw++] = 0x00000018; |
229 | ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ |
230 | ib->ptr[ib->length_dw++] = handle; |
231 | ib->ptr[ib->length_dw++] = 0x00010000; |
232 | ib->ptr[ib->length_dw++] = upper_32_bits(addr)((u32)(((addr) >> 16) >> 16)); |
233 | ib->ptr[ib->length_dw++] = addr; |
234 | |
235 | ib->ptr[ib->length_dw++] = 0x00000014; |
236 | ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ |
237 | ib->ptr[ib->length_dw++] = 0x0000001c; |
238 | ib->ptr[ib->length_dw++] = 0x00000001; |
239 | ib->ptr[ib->length_dw++] = 0x00000000; |
240 | |
241 | ib->ptr[ib->length_dw++] = 0x00000008; |
242 | ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ |
243 | |
244 | for (i = ib->length_dw; i < ib_size_dw; ++i) |
245 | ib->ptr[i] = 0x0; |
246 | |
247 | r = amdgpu_job_submit_direct(job, ring, &f); |
248 | if (r) |
249 | goto err; |
250 | |
251 | if (fence) |
252 | *fence = dma_fence_get(f); |
253 | dma_fence_put(f); |
254 | return 0; |
255 | |
256 | err: |
257 | amdgpu_job_free(job); |
258 | return r; |
259 | } |
260 | |
261 | /** |
262 | * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg |
263 | * |
264 | * @ring: ring we should submit the msg to |
265 | * @handle: session handle to use |
266 | * @bo: amdgpu object for which we query the offset |
267 | * @fence: optional fence to return |
268 | * |
269 | * Close up a stream for HW test or if userspace failed to do so |
270 | */ |
271 | static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, |
272 | uint32_t handle, |
273 | struct amdgpu_bo *bo, |
274 | struct dma_fence **fence) |
275 | { |
276 | const unsigned ib_size_dw = 16; |
277 | struct amdgpu_job *job; |
278 | struct amdgpu_ib *ib; |
279 | struct dma_fence *f = NULL((void *)0); |
280 | uint64_t addr; |
281 | int i, r; |
282 | |
283 | r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, |
284 | AMDGPU_IB_POOL_DIRECT, &job); |
285 | if (r) |
286 | return r; |
287 | |
288 | ib = &job->ibs[0]; |
289 | addr = amdgpu_bo_gpu_offset(bo); |
290 | |
291 | ib->length_dw = 0; |
292 | ib->ptr[ib->length_dw++] = 0x00000018; |
293 | ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ |
294 | ib->ptr[ib->length_dw++] = handle; |
295 | ib->ptr[ib->length_dw++] = 0x00010000; |
296 | ib->ptr[ib->length_dw++] = upper_32_bits(addr)((u32)(((addr) >> 16) >> 16)); |
297 | ib->ptr[ib->length_dw++] = addr; |
298 | |
299 | ib->ptr[ib->length_dw++] = 0x00000014; |
300 | ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ |
301 | ib->ptr[ib->length_dw++] = 0x0000001c; |
302 | ib->ptr[ib->length_dw++] = 0x00000001; |
303 | ib->ptr[ib->length_dw++] = 0x00000000; |
304 | |
305 | ib->ptr[ib->length_dw++] = 0x00000008; |
306 | ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ |
307 | |
308 | for (i = ib->length_dw; i < ib_size_dw; ++i) |
309 | ib->ptr[i] = 0x0; |
310 | |
311 | r = amdgpu_job_submit_direct(job, ring, &f); |
312 | if (r) |
313 | goto err; |
314 | |
315 | if (fence) |
316 | *fence = dma_fence_get(f); |
317 | dma_fence_put(f); |
318 | return 0; |
319 | |
320 | err: |
321 | amdgpu_job_free(job); |
322 | return r; |
323 | } |
324 | |
325 | /** |
326 | * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working |
327 | * |
328 | * @ring: the engine to test on |
329 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT |
330 | * |
331 | */ |
332 | static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
333 | { |
334 | struct dma_fence *fence = NULL((void *)0); |
335 | struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; |
336 | long r; |
337 | |
338 | r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL((void *)0)); |
339 | if (r) |
340 | goto error; |
341 | |
342 | r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence); |
343 | if (r) |
344 | goto error; |
345 | |
346 | r = dma_fence_wait_timeout(fence, false0, timeout); |
347 | if (r == 0) |
348 | r = -ETIMEDOUT60; |
349 | else if (r > 0) |
350 | r = 0; |
351 | |
352 | error: |
353 | dma_fence_put(fence); |
354 | return r; |
355 | } |
356 | |
357 | static int uvd_v6_0_early_init(void *handle) |
358 | { |
359 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
360 | adev->uvd.num_uvd_inst = 1; |
361 | |
362 | if (!(adev->flags & AMD_IS_APU) && |
363 | (RREG32_SMC(ixCC_HARVEST_FUSES)adev->smc_rreg(adev, (0xc00c0028)) & CC_HARVEST_FUSES__UVD_DISABLE_MASK0x10)) |
364 | return -ENOENT2; |
365 | |
366 | uvd_v6_0_set_ring_funcs(adev); |
367 | |
368 | if (uvd_v6_0_enc_support(adev)) { |
369 | adev->uvd.num_enc_rings = 2; |
370 | uvd_v6_0_set_enc_ring_funcs(adev); |
371 | } |
372 | |
373 | uvd_v6_0_set_irq_funcs(adev); |
374 | |
375 | return 0; |
376 | } |
377 | |
378 | static int uvd_v6_0_sw_init(void *handle) |
379 | { |
380 | struct amdgpu_ring *ring; |
381 | int i, r; |
382 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
383 | |
384 | /* UVD TRAP */ |
385 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY0, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE0x0000007c, &adev->uvd.inst->irq); |
386 | if (r) |
387 | return r; |
388 | |
389 | /* UVD ENC TRAP */ |
390 | if (uvd_v6_0_enc_support(adev)) { |
391 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
392 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY0, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP0x00000077, &adev->uvd.inst->irq); |
393 | if (r) |
394 | return r; |
395 | } |
396 | } |
397 | |
398 | r = amdgpu_uvd_sw_init(adev); |
399 | if (r) |
400 | return r; |
401 | |
402 | if (!uvd_v6_0_enc_support(adev)) { |
403 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) |
404 | adev->uvd.inst->ring_enc[i].funcs = NULL((void *)0); |
405 | |
406 | adev->uvd.inst->irq.num_types = 1; |
407 | adev->uvd.num_enc_rings = 0; |
408 | |
409 | DRM_INFO("UVD ENC is disabled\n")printk("\0016" "[" "drm" "] " "UVD ENC is disabled\n"); |
410 | } |
411 | |
412 | ring = &adev->uvd.inst->ring; |
413 | snprintf(ring->name, sizeof(ring->name), "uvd"); |
414 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, |
415 | AMDGPU_RING_PRIO_DEFAULT, NULL((void *)0)); |
416 | if (r) |
417 | return r; |
418 | |
419 | r = amdgpu_uvd_resume(adev); |
420 | if (r) |
421 | return r; |
422 | |
423 | if (uvd_v6_0_enc_support(adev)) { |
424 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
425 | ring = &adev->uvd.inst->ring_enc[i]; |
426 | snprintf(ring->name, sizeof(ring->name), "uvd_enc%d", i); |
427 | r = amdgpu_ring_init(adev, ring, 512, |
428 | &adev->uvd.inst->irq, 0, |
429 | AMDGPU_RING_PRIO_DEFAULT, NULL((void *)0)); |
430 | if (r) |
431 | return r; |
432 | } |
433 | } |
434 | |
435 | r = amdgpu_uvd_entity_init(adev); |
436 | |
437 | return r; |
438 | } |
439 | |
440 | static int uvd_v6_0_sw_fini(void *handle) |
441 | { |
442 | int i, r; |
443 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
444 | |
445 | r = amdgpu_uvd_suspend(adev); |
446 | if (r) |
447 | return r; |
448 | |
449 | if (uvd_v6_0_enc_support(adev)) { |
450 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) |
451 | amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); |
452 | } |
453 | |
454 | return amdgpu_uvd_sw_fini(adev); |
455 | } |
456 | |
457 | /** |
458 | * uvd_v6_0_hw_init - start and test UVD block |
459 | * |
460 | * @handle: handle used to pass amdgpu_device pointer |
461 | * |
462 | * Initialize the hardware, boot up the VCPU and do some testing |
463 | */ |
464 | static int uvd_v6_0_hw_init(void *handle) |
465 | { |
466 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
467 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
468 | uint32_t tmp; |
469 | int i, r; |
470 | |
471 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000)(adev)->asic_funcs->set_uvd_clocks((adev), (10000), (10000 )); |
472 | uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); |
473 | uvd_v6_0_enable_mgcg(adev, true1); |
474 | |
475 | r = amdgpu_ring_test_helper(ring); |
476 | if (r) |
477 | goto done; |
478 | |
479 | r = amdgpu_ring_alloc(ring, 10); |
480 | if (r) { |
481 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r)__drm_err("amdgpu: ring failed to lock UVD ring (%d).\n", r); |
482 | goto done; |
483 | } |
484 | |
485 | tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0)((0 << 30) | ((0x3db2) & 0xFFFF) | ((0) & 0x3FFF ) << 16); |
486 | amdgpu_ring_write(ring, tmp); |
487 | amdgpu_ring_write(ring, 0xFFFFF); |
488 | |
489 | tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0)((0 << 30) | ((0x3db1) & 0xFFFF) | ((0) & 0x3FFF ) << 16); |
490 | amdgpu_ring_write(ring, tmp); |
491 | amdgpu_ring_write(ring, 0xFFFFF); |
492 | |
493 | tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0)((0 << 30) | ((0x3db3) & 0xFFFF) | ((0) & 0x3FFF ) << 16); |
494 | amdgpu_ring_write(ring, tmp); |
495 | amdgpu_ring_write(ring, 0xFFFFF); |
496 | |
497 | /* Clear timeout status bits */ |
498 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)((0 << 30) | ((0x3db0) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
499 | amdgpu_ring_write(ring, 0x8); |
500 | |
501 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)((0 << 30) | ((0x3d00) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
502 | amdgpu_ring_write(ring, 3); |
503 | |
504 | amdgpu_ring_commit(ring); |
505 | |
506 | if (uvd_v6_0_enc_support(adev)) { |
507 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
508 | ring = &adev->uvd.inst->ring_enc[i]; |
509 | r = amdgpu_ring_test_helper(ring); |
510 | if (r) |
511 | goto done; |
512 | } |
513 | } |
514 | |
515 | done: |
516 | if (!r) { |
517 | if (uvd_v6_0_enc_support(adev)) |
518 | DRM_INFO("UVD and UVD ENC initialized successfully.\n")printk("\0016" "[" "drm" "] " "UVD and UVD ENC initialized successfully.\n" ); |
519 | else |
520 | DRM_INFO("UVD initialized successfully.\n")printk("\0016" "[" "drm" "] " "UVD initialized successfully.\n" ); |
521 | } |
522 | |
523 | return r; |
524 | } |
525 | |
526 | /** |
527 | * uvd_v6_0_hw_fini - stop the hardware block |
528 | * |
529 | * @handle: handle used to pass amdgpu_device pointer |
530 | * |
531 | * Stop the UVD block, mark ring as not ready any more |
532 | */ |
533 | static int uvd_v6_0_hw_fini(void *handle) |
534 | { |
535 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
536 | |
537 | cancel_delayed_work_sync(&adev->uvd.idle_work); |
538 | |
539 | if (RREG32(mmUVD_STATUS)amdgpu_device_rreg(adev, (0x3daf), 0) != 0) |
540 | uvd_v6_0_stop(adev); |
541 | |
542 | return 0; |
543 | } |
544 | |
545 | static int uvd_v6_0_suspend(void *handle) |
546 | { |
547 | int r; |
548 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
549 | |
550 | /* |
551 | * Proper cleanups before halting the HW engine: |
552 | * - cancel the delayed idle work |
553 | * - enable powergating |
554 | * - enable clockgating |
555 | * - disable dpm |
556 | * |
557 | * TODO: to align with the VCN implementation, move the |
558 | * jobs for clockgating/powergating/dpm setting to |
559 | * ->set_powergating_state(). |
560 | */ |
561 | cancel_delayed_work_sync(&adev->uvd.idle_work); |
562 | |
563 | if (adev->pm.dpm_enabled) { |
564 | amdgpu_dpm_enable_uvd(adev, false0); |
565 | } else { |
566 | amdgpu_asic_set_uvd_clocks(adev, 0, 0)(adev)->asic_funcs->set_uvd_clocks((adev), (0), (0)); |
567 | /* shutdown the UVD block */ |
568 | amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, |
569 | AMD_PG_STATE_GATE); |
570 | amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, |
571 | AMD_CG_STATE_GATE); |
572 | } |
573 | |
574 | r = uvd_v6_0_hw_fini(adev); |
575 | if (r) |
576 | return r; |
577 | |
578 | return amdgpu_uvd_suspend(adev); |
579 | } |
580 | |
581 | static int uvd_v6_0_resume(void *handle) |
582 | { |
583 | int r; |
584 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
585 | |
586 | r = amdgpu_uvd_resume(adev); |
587 | if (r) |
588 | return r; |
589 | |
590 | return uvd_v6_0_hw_init(adev); |
591 | } |
592 | |
593 | /** |
594 | * uvd_v6_0_mc_resume - memory controller programming |
595 | * |
596 | * @adev: amdgpu_device pointer |
597 | * |
598 | * Let the UVD memory controller know it's offsets |
599 | */ |
600 | static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) |
601 | { |
602 | uint64_t offset; |
603 | uint32_t size; |
604 | |
605 | /* program memory controller bits 0-27 */ |
606 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,amdgpu_device_wreg(adev, (0x3c5f), (((u32)(adev->uvd.inst-> gpu_addr))), 0) |
607 | lower_32_bits(adev->uvd.inst->gpu_addr))amdgpu_device_wreg(adev, (0x3c5f), (((u32)(adev->uvd.inst-> gpu_addr))), 0); |
608 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,amdgpu_device_wreg(adev, (0x3c5e), (((u32)(((adev->uvd.inst ->gpu_addr) >> 16) >> 16))), 0) |
609 | upper_32_bits(adev->uvd.inst->gpu_addr))amdgpu_device_wreg(adev, (0x3c5e), (((u32)(((adev->uvd.inst ->gpu_addr) >> 16) >> 16))), 0); |
610 | |
611 | offset = AMDGPU_UVD_FIRMWARE_OFFSET256; |
612 | size = AMDGPU_UVD_FIRMWARE_SIZE(adev)((((((__uint32_t)(((const struct common_firmware_header *)(adev )->uvd.fw->data)->ucode_size_bytes)) + 8) + (4096 - 1 )) & ~(4096 - 1)) - 256); |
613 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3)amdgpu_device_wreg(adev, (0x3d82), (offset >> 3), 0); |
614 | WREG32(mmUVD_VCPU_CACHE_SIZE0, size)amdgpu_device_wreg(adev, (0x3d83), (size), 0); |
615 | |
616 | offset += size; |
617 | size = AMDGPU_UVD_HEAP_SIZE(256*1024); |
618 | WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3)amdgpu_device_wreg(adev, (0x3d84), (offset >> 3), 0); |
619 | WREG32(mmUVD_VCPU_CACHE_SIZE1, size)amdgpu_device_wreg(adev, (0x3d85), (size), 0); |
620 | |
621 | offset += size; |
622 | size = AMDGPU_UVD_STACK_SIZE(200*1024) + |
623 | (AMDGPU_UVD_SESSION_SIZE(50*1024) * adev->uvd.max_handles); |
624 | WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3)amdgpu_device_wreg(adev, (0x3d86), (offset >> 3), 0); |
625 | WREG32(mmUVD_VCPU_CACHE_SIZE2, size)amdgpu_device_wreg(adev, (0x3d87), (size), 0); |
626 | |
627 | WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, (0x3bd3), (adev->gfx.config.gb_addr_config ), 0); |
628 | WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, (0x3bd4), (adev->gfx.config.gb_addr_config ), 0); |
629 | WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, (0x3bd5), (adev->gfx.config.gb_addr_config ), 0); |
630 | |
631 | WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles)amdgpu_device_wreg(adev, (0x3d38), (adev->uvd.max_handles) , 0); |
632 | } |
633 | |
634 | #if 0 |
635 | static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, |
636 | bool_Bool enable) |
637 | { |
638 | u32 data, data1; |
639 | |
640 | data = RREG32(mmUVD_CGC_GATE)amdgpu_device_rreg(adev, (0x3d2a), 0); |
641 | data1 = RREG32(mmUVD_SUVD_CGC_GATE)amdgpu_device_rreg(adev, (0x3be4), 0); |
642 | if (enable) { |
643 | data |= UVD_CGC_GATE__SYS_MASK0x1 | |
644 | UVD_CGC_GATE__UDEC_MASK0x2 | |
645 | UVD_CGC_GATE__MPEG2_MASK0x4 | |
646 | UVD_CGC_GATE__RBC_MASK0x10 | |
647 | UVD_CGC_GATE__LMI_MC_MASK0x20 | |
648 | UVD_CGC_GATE__IDCT_MASK0x80 | |
649 | UVD_CGC_GATE__MPRD_MASK0x100 | |
650 | UVD_CGC_GATE__MPC_MASK0x200 | |
651 | UVD_CGC_GATE__LBSI_MASK0x400 | |
652 | UVD_CGC_GATE__LRBBM_MASK0x800 | |
653 | UVD_CGC_GATE__UDEC_RE_MASK0x1000 | |
654 | UVD_CGC_GATE__UDEC_CM_MASK0x2000 | |
655 | UVD_CGC_GATE__UDEC_IT_MASK0x4000 | |
656 | UVD_CGC_GATE__UDEC_DB_MASK0x8000 | |
657 | UVD_CGC_GATE__UDEC_MP_MASK0x10000 | |
658 | UVD_CGC_GATE__WCB_MASK0x20000 | |
659 | UVD_CGC_GATE__VCPU_MASK0x40000 | |
660 | UVD_CGC_GATE__SCPU_MASK0x80000; |
661 | data1 |= UVD_SUVD_CGC_GATE__SRE_MASK0x1 | |
662 | UVD_SUVD_CGC_GATE__SIT_MASK0x2 | |
663 | UVD_SUVD_CGC_GATE__SMP_MASK0x4 | |
664 | UVD_SUVD_CGC_GATE__SCM_MASK0x8 | |
665 | UVD_SUVD_CGC_GATE__SDB_MASK0x10 | |
666 | UVD_SUVD_CGC_GATE__SRE_H264_MASK0x20 | |
667 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK0x40 | |
668 | UVD_SUVD_CGC_GATE__SIT_H264_MASK0x80 | |
669 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK0x100 | |
670 | UVD_SUVD_CGC_GATE__SCM_H264_MASK0x200 | |
671 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK0x400 | |
672 | UVD_SUVD_CGC_GATE__SDB_H264_MASK0x800 | |
673 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK0x1000; |
674 | } else { |
675 | data &= ~(UVD_CGC_GATE__SYS_MASK0x1 | |
676 | UVD_CGC_GATE__UDEC_MASK0x2 | |
677 | UVD_CGC_GATE__MPEG2_MASK0x4 | |
678 | UVD_CGC_GATE__RBC_MASK0x10 | |
679 | UVD_CGC_GATE__LMI_MC_MASK0x20 | |
680 | UVD_CGC_GATE__LMI_UMC_MASK0x40 | |
681 | UVD_CGC_GATE__IDCT_MASK0x80 | |
682 | UVD_CGC_GATE__MPRD_MASK0x100 | |
683 | UVD_CGC_GATE__MPC_MASK0x200 | |
684 | UVD_CGC_GATE__LBSI_MASK0x400 | |
685 | UVD_CGC_GATE__LRBBM_MASK0x800 | |
686 | UVD_CGC_GATE__UDEC_RE_MASK0x1000 | |
687 | UVD_CGC_GATE__UDEC_CM_MASK0x2000 | |
688 | UVD_CGC_GATE__UDEC_IT_MASK0x4000 | |
689 | UVD_CGC_GATE__UDEC_DB_MASK0x8000 | |
690 | UVD_CGC_GATE__UDEC_MP_MASK0x10000 | |
691 | UVD_CGC_GATE__WCB_MASK0x20000 | |
692 | UVD_CGC_GATE__VCPU_MASK0x40000 | |
693 | UVD_CGC_GATE__SCPU_MASK0x80000); |
694 | data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK0x1 | |
695 | UVD_SUVD_CGC_GATE__SIT_MASK0x2 | |
696 | UVD_SUVD_CGC_GATE__SMP_MASK0x4 | |
697 | UVD_SUVD_CGC_GATE__SCM_MASK0x8 | |
698 | UVD_SUVD_CGC_GATE__SDB_MASK0x10 | |
699 | UVD_SUVD_CGC_GATE__SRE_H264_MASK0x20 | |
700 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK0x40 | |
701 | UVD_SUVD_CGC_GATE__SIT_H264_MASK0x80 | |
702 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK0x100 | |
703 | UVD_SUVD_CGC_GATE__SCM_H264_MASK0x200 | |
704 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK0x400 | |
705 | UVD_SUVD_CGC_GATE__SDB_H264_MASK0x800 | |
706 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK0x1000); |
707 | } |
708 | WREG32(mmUVD_CGC_GATE, data)amdgpu_device_wreg(adev, (0x3d2a), (data), 0); |
709 | WREG32(mmUVD_SUVD_CGC_GATE, data1)amdgpu_device_wreg(adev, (0x3be4), (data1), 0); |
710 | } |
711 | #endif |
712 | |
713 | /** |
714 | * uvd_v6_0_start - start UVD block |
715 | * |
716 | * @adev: amdgpu_device pointer |
717 | * |
718 | * Setup and start the UVD block |
719 | */ |
720 | static int uvd_v6_0_start(struct amdgpu_device *adev) |
721 | { |
722 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
723 | uint32_t rb_bufsz, tmp; |
724 | uint32_t lmi_swap_cntl; |
725 | uint32_t mp_swap_cntl; |
726 | int i, j, r; |
727 | |
728 | /* disable DPG */ |
729 | WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, (0x38c4), 0); tmp_ &= (~0x4); tmp_ |= ((0) & ~(~0x4)); amdgpu_device_wreg (adev, (0x38c4), (tmp_), 0); } while (0); |
730 | |
731 | /* disable byte swapping */ |
732 | lmi_swap_cntl = 0; |
733 | mp_swap_cntl = 0; |
734 | |
735 | uvd_v6_0_mc_resume(adev); |
736 | |
737 | /* disable interupt */ |
738 | WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0)amdgpu_device_wreg(adev, (0x3d40), ((amdgpu_device_rreg(adev, (0x3d40), 0) & ~0x2) | (0) << 0x1), 0); |
739 | |
740 | /* stall UMC and register bus before resetting VCPU */ |
741 | WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1)amdgpu_device_wreg(adev, (0x3d3d), ((amdgpu_device_rreg(adev, (0x3d3d), 0) & ~0x100) | (1) << 0x8), 0); |
742 | mdelay(1); |
743 | |
744 | /* put LMI, VCPU, RBC etc... into reset */ |
745 | WREG32(mmUVD_SOFT_RESET,amdgpu_device_wreg(adev, (0x3da0), (0x4 | 0x8 | 0x2 | 0x1 | 0x20 | 0x40 | 0x80 | 0x2000), 0) |
746 | UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |amdgpu_device_wreg(adev, (0x3da0), (0x4 | 0x8 | 0x2 | 0x1 | 0x20 | 0x40 | 0x80 | 0x2000), 0) |
747 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |amdgpu_device_wreg(adev, (0x3da0), (0x4 | 0x8 | 0x2 | 0x1 | 0x20 | 0x40 | 0x80 | 0x2000), 0) |
748 | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |amdgpu_device_wreg(adev, (0x3da0), (0x4 | 0x8 | 0x2 | 0x1 | 0x20 | 0x40 | 0x80 | 0x2000), 0) |
749 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |amdgpu_device_wreg(adev, (0x3da0), (0x4 | 0x8 | 0x2 | 0x1 | 0x20 | 0x40 | 0x80 | 0x2000), 0) |
750 | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |amdgpu_device_wreg(adev, (0x3da0), (0x4 | 0x8 | 0x2 | 0x1 | 0x20 | 0x40 | 0x80 | 0x2000), 0) |
751 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |amdgpu_device_wreg(adev, (0x3da0), (0x4 | 0x8 | 0x2 | 0x1 | 0x20 | 0x40 | 0x80 | 0x2000), 0) |
752 | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |amdgpu_device_wreg(adev, (0x3da0), (0x4 | 0x8 | 0x2 | 0x1 | 0x20 | 0x40 | 0x80 | 0x2000), 0) |
753 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK)amdgpu_device_wreg(adev, (0x3da0), (0x4 | 0x8 | 0x2 | 0x1 | 0x20 | 0x40 | 0x80 | 0x2000), 0); |
754 | mdelay(5); |
755 | |
756 | /* take UVD block out of reset */ |
757 | WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0)amdgpu_device_wreg(adev, (0x398), ((amdgpu_device_rreg(adev, ( 0x398), 0) & ~0x40000) | (0) << 0x12), 0); |
758 | mdelay(5); |
759 | |
760 | /* initialize UVD memory controller */ |
761 | WREG32(mmUVD_LMI_CTRL,amdgpu_device_wreg(adev, (0x3d66), ((0x40 << 0x0) | 0x100 | 0x2000 | 0x200000 | 0x200 | 0x100000), 0) |
762 | (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |amdgpu_device_wreg(adev, (0x3d66), ((0x40 << 0x0) | 0x100 | 0x2000 | 0x200000 | 0x200 | 0x100000), 0) |
763 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |amdgpu_device_wreg(adev, (0x3d66), ((0x40 << 0x0) | 0x100 | 0x2000 | 0x200000 | 0x200 | 0x100000), 0) |
764 | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |amdgpu_device_wreg(adev, (0x3d66), ((0x40 << 0x0) | 0x100 | 0x2000 | 0x200000 | 0x200 | 0x100000), 0) |
765 | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |amdgpu_device_wreg(adev, (0x3d66), ((0x40 << 0x0) | 0x100 | 0x2000 | 0x200000 | 0x200 | 0x100000), 0) |
766 | UVD_LMI_CTRL__REQ_MODE_MASK |amdgpu_device_wreg(adev, (0x3d66), ((0x40 << 0x0) | 0x100 | 0x2000 | 0x200000 | 0x200 | 0x100000), 0) |
767 | UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK)amdgpu_device_wreg(adev, (0x3d66), ((0x40 << 0x0) | 0x100 | 0x2000 | 0x200000 | 0x200 | 0x100000), 0); |
768 | |
769 | #ifdef __BIG_ENDIAN |
770 | /* swap (8 in 32) RB and IB */ |
771 | lmi_swap_cntl = 0xa; |
772 | mp_swap_cntl = 0; |
773 | #endif |
774 | WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl)amdgpu_device_wreg(adev, (0x3d6d), (lmi_swap_cntl), 0); |
775 | WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl)amdgpu_device_wreg(adev, (0x3d6f), (mp_swap_cntl), 0); |
776 | |
777 | WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040)amdgpu_device_wreg(adev, (0x3d79), (0x40c2040), 0); |
778 | WREG32(mmUVD_MPC_SET_MUXA1, 0x0)amdgpu_device_wreg(adev, (0x3d7a), (0x0), 0); |
779 | WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040)amdgpu_device_wreg(adev, (0x3d7b), (0x40c2040), 0); |
780 | WREG32(mmUVD_MPC_SET_MUXB1, 0x0)amdgpu_device_wreg(adev, (0x3d7c), (0x0), 0); |
781 | WREG32(mmUVD_MPC_SET_ALU, 0)amdgpu_device_wreg(adev, (0x3d7e), (0), 0); |
782 | WREG32(mmUVD_MPC_SET_MUX, 0x88)amdgpu_device_wreg(adev, (0x3d7d), (0x88), 0); |
783 | |
784 | /* take all subblocks out of reset, except VCPU */ |
785 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK)amdgpu_device_wreg(adev, (0x3da0), (0x8), 0); |
786 | mdelay(5); |
787 | |
788 | /* enable VCPU clock */ |
789 | WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK)amdgpu_device_wreg(adev, (0x3d98), (0x200), 0); |
790 | |
791 | /* enable UMC */ |
792 | WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0)amdgpu_device_wreg(adev, (0x3d3d), ((amdgpu_device_rreg(adev, (0x3d3d), 0) & ~0x100) | (0) << 0x8), 0); |
793 | |
794 | /* boot up the VCPU */ |
795 | WREG32(mmUVD_SOFT_RESET, 0)amdgpu_device_wreg(adev, (0x3da0), (0), 0); |
796 | mdelay(10); |
797 | |
798 | for (i = 0; i < 10; ++i) { |
799 | uint32_t status; |
800 | |
801 | for (j = 0; j < 100; ++j) { |
802 | status = RREG32(mmUVD_STATUS)amdgpu_device_rreg(adev, (0x3daf), 0); |
803 | if (status & 2) |
804 | break; |
805 | mdelay(10); |
806 | } |
807 | r = 0; |
808 | if (status & 2) |
809 | break; |
810 | |
811 | DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n")__drm_err("UVD not responding, trying to reset the VCPU!!!\n" ); |
812 | WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1)amdgpu_device_wreg(adev, (0x3da0), ((amdgpu_device_rreg(adev, (0x3da0), 0) & ~0x8) | (1) << 0x3), 0); |
813 | mdelay(10); |
814 | WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0)amdgpu_device_wreg(adev, (0x3da0), ((amdgpu_device_rreg(adev, (0x3da0), 0) & ~0x8) | (0) << 0x3), 0); |
815 | mdelay(10); |
816 | r = -1; |
817 | } |
818 | |
819 | if (r) { |
820 | DRM_ERROR("UVD not responding, giving up!!!\n")__drm_err("UVD not responding, giving up!!!\n"); |
821 | return r; |
822 | } |
823 | /* enable master interrupt */ |
824 | WREG32_P(mmUVD_MASTINT_EN,do { uint32_t tmp_ = amdgpu_device_rreg(adev, (0x3d40), 0); tmp_ &= (~(0x2|0x4)); tmp_ |= (((0x2|0x4)) & ~(~(0x2|0x4) )); amdgpu_device_wreg(adev, (0x3d40), (tmp_), 0); } while (0 ) |
825 | (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),do { uint32_t tmp_ = amdgpu_device_rreg(adev, (0x3d40), 0); tmp_ &= (~(0x2|0x4)); tmp_ |= (((0x2|0x4)) & ~(~(0x2|0x4) )); amdgpu_device_wreg(adev, (0x3d40), (tmp_), 0); } while (0 ) |
826 | ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK))do { uint32_t tmp_ = amdgpu_device_rreg(adev, (0x3d40), 0); tmp_ &= (~(0x2|0x4)); tmp_ |= (((0x2|0x4)) & ~(~(0x2|0x4) )); amdgpu_device_wreg(adev, (0x3d40), (tmp_), 0); } while (0 ); |
827 | |
828 | /* clear the bit 4 of UVD_STATUS */ |
829 | WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT))do { uint32_t tmp_ = amdgpu_device_rreg(adev, (0x3daf), 0); tmp_ &= (~(2 << 0x1)); tmp_ |= ((0) & ~(~(2 << 0x1))); amdgpu_device_wreg(adev, (0x3daf), (tmp_), 0); } while (0); |
830 | |
831 | /* force RBC into idle state */ |
832 | rb_bufsz = order_base_2(ring->ring_size)drm_order(ring->ring_size); |
833 | tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz)(((0) & ~0x1f) | (0x1f & ((rb_bufsz) << 0x0))); |
834 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1)(((tmp) & ~0x1f00) | (0x1f00 & ((1) << 0x8))); |
835 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1)(((tmp) & ~0x10000) | (0x10000 & ((1) << 0x10)) ); |
836 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0)(((tmp) & ~0x100000) | (0x100000 & ((0) << 0x14 ))); |
837 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1)(((tmp) & ~0x1000000) | (0x1000000 & ((1) << 0x18 ))); |
838 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1)(((tmp) & ~0x10000000) | (0x10000000 & ((1) << 0x1c ))); |
839 | WREG32(mmUVD_RBC_RB_CNTL, tmp)amdgpu_device_wreg(adev, (0x3da9), (tmp), 0); |
840 | |
841 | /* set the write pointer delay */ |
842 | WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0)amdgpu_device_wreg(adev, (0x3da6), (0), 0); |
843 | |
844 | /* set the wb address */ |
845 | WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2))amdgpu_device_wreg(adev, (0x3daa), ((((u32)(((ring->gpu_addr ) >> 16) >> 16)) >> 2)), 0); |
846 | |
847 | /* program the RB_BASE for ring buffer */ |
848 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,amdgpu_device_wreg(adev, (0x3c69), (((u32)(ring->gpu_addr) )), 0) |
849 | lower_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, (0x3c69), (((u32)(ring->gpu_addr) )), 0); |
850 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,amdgpu_device_wreg(adev, (0x3c68), (((u32)(((ring->gpu_addr ) >> 16) >> 16))), 0) |
851 | upper_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, (0x3c68), (((u32)(((ring->gpu_addr ) >> 16) >> 16))), 0); |
852 | |
853 | /* Initialize the ring buffer's read and write pointers */ |
854 | WREG32(mmUVD_RBC_RB_RPTR, 0)amdgpu_device_wreg(adev, (0x3da4), (0), 0); |
855 | |
856 | ring->wptr = RREG32(mmUVD_RBC_RB_RPTR)amdgpu_device_rreg(adev, (0x3da4), 0); |
857 | WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, (0x3da5), (((u32)(ring->wptr))), 0 ); |
858 | |
859 | WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0)amdgpu_device_wreg(adev, (0x3da9), ((amdgpu_device_rreg(adev, (0x3da9), 0) & ~0x10000) | (0) << 0x10), 0); |
860 | |
861 | if (uvd_v6_0_enc_support(adev)) { |
862 | ring = &adev->uvd.inst->ring_enc[0]; |
863 | WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, (0x3c29), (((u32)(ring->wptr))), 0 ); |
864 | WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, (0x3c2a), (((u32)(ring->wptr))), 0 ); |
865 | WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr)amdgpu_device_wreg(adev, (0x3c26), (ring->gpu_addr), 0); |
866 | WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, (0x3c27), (((u32)(((ring->gpu_addr ) >> 16) >> 16))), 0); |
867 | WREG32(mmUVD_RB_SIZE, ring->ring_size / 4)amdgpu_device_wreg(adev, (0x3c28), (ring->ring_size / 4), 0 ); |
868 | |
869 | ring = &adev->uvd.inst->ring_enc[1]; |
870 | WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, (0x3c24), (((u32)(ring->wptr))), 0 ); |
871 | WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, (0x3c25), (((u32)(ring->wptr))), 0 ); |
872 | WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr)amdgpu_device_wreg(adev, (0x3c21), (ring->gpu_addr), 0); |
873 | WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, (0x3c22), (((u32)(((ring->gpu_addr ) >> 16) >> 16))), 0); |
874 | WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4)amdgpu_device_wreg(adev, (0x3c23), (ring->ring_size / 4), 0 ); |
875 | } |
876 | |
877 | return 0; |
878 | } |
879 | |
880 | /** |
881 | * uvd_v6_0_stop - stop UVD block |
882 | * |
883 | * @adev: amdgpu_device pointer |
884 | * |
885 | * stop the UVD block |
886 | */ |
887 | static void uvd_v6_0_stop(struct amdgpu_device *adev) |
888 | { |
889 | /* force RBC into idle state */ |
890 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101)amdgpu_device_wreg(adev, (0x3da9), (0x11010101), 0); |
891 | |
892 | /* Stall UMC and register bus before resetting VCPU */ |
893 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8))do { uint32_t tmp_ = amdgpu_device_rreg(adev, (0x3d3d), 0); tmp_ &= (~(1 << 8)); tmp_ |= ((1 << 8) & ~(~( 1 << 8))); amdgpu_device_wreg(adev, (0x3d3d), (tmp_), 0 ); } while (0); |
894 | mdelay(1); |
895 | |
896 | /* put VCPU into reset */ |
897 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK)amdgpu_device_wreg(adev, (0x3da0), (0x8), 0); |
898 | mdelay(5); |
899 | |
900 | /* disable VCPU clock */ |
901 | WREG32(mmUVD_VCPU_CNTL, 0x0)amdgpu_device_wreg(adev, (0x3d98), (0x0), 0); |
902 | |
903 | /* Unstall UMC and register bus */ |
904 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8))do { uint32_t tmp_ = amdgpu_device_rreg(adev, (0x3d3d), 0); tmp_ &= (~(1 << 8)); tmp_ |= ((0) & ~(~(1 << 8 ))); amdgpu_device_wreg(adev, (0x3d3d), (tmp_), 0); } while ( 0); |
905 | |
906 | WREG32(mmUVD_STATUS, 0)amdgpu_device_wreg(adev, (0x3daf), (0), 0); |
907 | } |
908 | |
909 | /** |
910 | * uvd_v6_0_ring_emit_fence - emit an fence & trap command |
911 | * |
912 | * @ring: amdgpu_ring pointer |
913 | * @addr: address |
914 | * @seq: sequence number |
915 | * @flags: fence related flags |
916 | * |
917 | * Write a fence and a trap command to the ring. |
918 | */ |
919 | static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, |
920 | unsigned flags) |
921 | { |
922 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT)({ int __ret = !!(flags & (1 << 0)); if (__ret) printf ("WARNING %s failed at %s:%d\n", "flags & (1 << 0)" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/uvd_v6_0.c", 922); __builtin_expect (!!(__ret), 0); }); |
923 | |
924 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)((0 << 30) | ((0x3dbd) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
925 | amdgpu_ring_write(ring, seq); |
926 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)((0 << 30) | ((0x3bc4) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
927 | amdgpu_ring_write(ring, addr & 0xffffffff); |
928 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)((0 << 30) | ((0x3bc5) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
929 | amdgpu_ring_write(ring, upper_32_bits(addr)((u32)(((addr) >> 16) >> 16)) & 0xff); |
930 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)((0 << 30) | ((0x3bc3) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
931 | amdgpu_ring_write(ring, 0); |
932 | |
933 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)((0 << 30) | ((0x3bc4) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
934 | amdgpu_ring_write(ring, 0); |
935 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)((0 << 30) | ((0x3bc5) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
936 | amdgpu_ring_write(ring, 0); |
937 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)((0 << 30) | ((0x3bc3) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
938 | amdgpu_ring_write(ring, 2); |
939 | } |
940 | |
941 | /** |
942 | * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command |
943 | * |
944 | * @ring: amdgpu_ring pointer |
945 | * @addr: address |
946 | * @seq: sequence number |
947 | * @flags: fence related flags |
948 | * |
949 | * Write enc a fence and a trap command to the ring. |
950 | */ |
951 | static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, |
952 | u64 seq, unsigned flags) |
953 | { |
954 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT)({ int __ret = !!(flags & (1 << 0)); if (__ret) printf ("WARNING %s failed at %s:%d\n", "flags & (1 << 0)" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/uvd_v6_0.c", 954); __builtin_expect (!!(__ret), 0); }); |
955 | |
956 | amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE0x00000003); |
957 | amdgpu_ring_write(ring, addr); |
958 | amdgpu_ring_write(ring, upper_32_bits(addr)((u32)(((addr) >> 16) >> 16))); |
959 | amdgpu_ring_write(ring, seq); |
960 | amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP0x00000004); |
961 | } |
962 | |
963 | /** |
964 | * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing |
965 | * |
966 | * @ring: amdgpu_ring pointer |
967 | */ |
968 | static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) |
969 | { |
970 | /* The firmware doesn't seem to like touching registers at this point. */ |
971 | } |
972 | |
973 | /** |
974 | * uvd_v6_0_ring_test_ring - register write test |
975 | * |
976 | * @ring: amdgpu_ring pointer |
977 | * |
978 | * Test if we can successfully write to the context register |
979 | */ |
980 | static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) |
981 | { |
982 | struct amdgpu_device *adev = ring->adev; |
983 | uint32_t tmp = 0; |
984 | unsigned i; |
985 | int r; |
986 | |
987 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD)amdgpu_device_wreg(adev, (0x3dbd), (0xCAFEDEAD), 0); |
988 | r = amdgpu_ring_alloc(ring, 3); |
989 | if (r) |
990 | return r; |
991 | |
992 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)((0 << 30) | ((0x3dbd) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
993 | amdgpu_ring_write(ring, 0xDEADBEEF); |
994 | amdgpu_ring_commit(ring); |
995 | for (i = 0; i < adev->usec_timeout; i++) { |
996 | tmp = RREG32(mmUVD_CONTEXT_ID)amdgpu_device_rreg(adev, (0x3dbd), 0); |
997 | if (tmp == 0xDEADBEEF) |
998 | break; |
999 | udelay(1); |
1000 | } |
1001 | |
1002 | if (i >= adev->usec_timeout) |
1003 | r = -ETIMEDOUT60; |
1004 | |
1005 | return r; |
1006 | } |
1007 | |
1008 | /** |
1009 | * uvd_v6_0_ring_emit_ib - execute indirect buffer |
1010 | * |
1011 | * @ring: amdgpu_ring pointer |
1012 | * @job: job to retrieve vmid from |
1013 | * @ib: indirect buffer to execute |
1014 | * @flags: unused |
1015 | * |
1016 | * Write ring commands to execute the indirect buffer |
1017 | */ |
1018 | static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, |
1019 | struct amdgpu_job *job, |
1020 | struct amdgpu_ib *ib, |
1021 | uint32_t flags) |
1022 | { |
1023 | unsigned vmid = AMDGPU_JOB_GET_VMID(job)((job) ? (job)->vmid : 0); |
1024 | |
1025 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)((0 << 30) | ((0x3da1) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1026 | amdgpu_ring_write(ring, vmid); |
1027 | |
1028 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)((0 << 30) | ((0x3c67) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1029 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)((u32)(ib->gpu_addr))); |
1030 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)((0 << 30) | ((0x3c66) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1031 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)((u32)(((ib->gpu_addr) >> 16) >> 16))); |
1032 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)((0 << 30) | ((0x3da2) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1033 | amdgpu_ring_write(ring, ib->length_dw); |
1034 | } |
1035 | |
1036 | /** |
1037 | * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer |
1038 | * |
1039 | * @ring: amdgpu_ring pointer |
1040 | * @job: job to retrive vmid from |
1041 | * @ib: indirect buffer to execute |
1042 | * @flags: unused |
1043 | * |
1044 | * Write enc ring commands to execute the indirect buffer |
1045 | */ |
1046 | static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, |
1047 | struct amdgpu_job *job, |
1048 | struct amdgpu_ib *ib, |
1049 | uint32_t flags) |
1050 | { |
1051 | unsigned vmid = AMDGPU_JOB_GET_VMID(job)((job) ? (job)->vmid : 0); |
1052 | |
1053 | amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM0x00000102); |
1054 | amdgpu_ring_write(ring, vmid); |
1055 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)((u32)(ib->gpu_addr))); |
1056 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)((u32)(((ib->gpu_addr) >> 16) >> 16))); |
1057 | amdgpu_ring_write(ring, ib->length_dw); |
1058 | } |
1059 | |
1060 | static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, |
1061 | uint32_t reg, uint32_t val) |
1062 | { |
1063 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)((0 << 30) | ((0x3bc4) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1064 | amdgpu_ring_write(ring, reg << 2); |
1065 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)((0 << 30) | ((0x3bc5) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1066 | amdgpu_ring_write(ring, val); |
1067 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)((0 << 30) | ((0x3bc3) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1068 | amdgpu_ring_write(ring, 0x8); |
1069 | } |
1070 | |
1071 | static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, |
1072 | unsigned vmid, uint64_t pd_addr) |
1073 | { |
1074 | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr)(ring)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((ring ), (vmid), (pd_addr)); |
1075 | |
1076 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)((0 << 30) | ((0x3bc4) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1077 | amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST0x51e << 2); |
1078 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)((0 << 30) | ((0x3bc5) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1079 | amdgpu_ring_write(ring, 0); |
1080 | amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)((0 << 30) | ((0x3c0a) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1081 | amdgpu_ring_write(ring, 1 << vmid); /* mask */ |
1082 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)((0 << 30) | ((0x3bc3) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1083 | amdgpu_ring_write(ring, 0xC); |
1084 | } |
1085 | |
1086 | static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
1087 | { |
1088 | uint32_t seq = ring->fence_drv.sync_seq; |
1089 | uint64_t addr = ring->fence_drv.gpu_addr; |
1090 | |
1091 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)((0 << 30) | ((0x3bc4) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1092 | amdgpu_ring_write(ring, lower_32_bits(addr)((u32)(addr))); |
1093 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)((0 << 30) | ((0x3bc5) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1094 | amdgpu_ring_write(ring, upper_32_bits(addr)((u32)(((addr) >> 16) >> 16))); |
1095 | amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)((0 << 30) | ((0x3c0a) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1096 | amdgpu_ring_write(ring, 0xffffffff); /* mask */ |
1097 | amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0)((0 << 30) | ((0x3c0b) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1098 | amdgpu_ring_write(ring, seq); |
1099 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)((0 << 30) | ((0x3bc3) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1100 | amdgpu_ring_write(ring, 0xE); |
1101 | } |
1102 | |
1103 | static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
1104 | { |
1105 | int i; |
1106 | |
1107 | WARN_ON(ring->wptr % 2 || count % 2)({ int __ret = !!(ring->wptr % 2 || count % 2); if (__ret) printf("WARNING %s failed at %s:%d\n", "ring->wptr % 2 || count % 2" , "/usr/src/sys/dev/pci/drm/amd/amdgpu/uvd_v6_0.c", 1107); __builtin_expect (!!(__ret), 0); }); |
1108 | |
1109 | for (i = 0; i < count / 2; i++) { |
1110 | amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)((0 << 30) | ((0x3bff) & 0xFFFF) | ((0) & 0x3FFF ) << 16)); |
1111 | amdgpu_ring_write(ring, 0); |
1112 | } |
1113 | } |
1114 | |
1115 | static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
1116 | { |
1117 | uint32_t seq = ring->fence_drv.sync_seq; |
1118 | uint64_t addr = ring->fence_drv.gpu_addr; |
1119 | |
1120 | amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE0x00000106); |
1121 | amdgpu_ring_write(ring, lower_32_bits(addr)((u32)(addr))); |
1122 | amdgpu_ring_write(ring, upper_32_bits(addr)((u32)(((addr) >> 16) >> 16))); |
1123 | amdgpu_ring_write(ring, seq); |
1124 | } |
1125 | |
1126 | static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring) |
1127 | { |
1128 | amdgpu_ring_write(ring, HEVC_ENC_CMD_END0x00000001); |
1129 | } |
1130 | |
1131 | static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, |
1132 | unsigned int vmid, uint64_t pd_addr) |
1133 | { |
1134 | amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB0x00000107); |
1135 | amdgpu_ring_write(ring, vmid); |
1136 | amdgpu_ring_write(ring, pd_addr >> 12); |
1137 | |
1138 | amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB0x00000108); |
1139 | amdgpu_ring_write(ring, vmid); |
1140 | } |
1141 | |
1142 | static bool_Bool uvd_v6_0_is_idle(void *handle) |
1143 | { |
1144 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1145 | |
1146 | return !(RREG32(mmSRBM_STATUS)amdgpu_device_rreg(adev, (0x394), 0) & SRBM_STATUS__UVD_BUSY_MASK0x80000); |
1147 | } |
1148 | |
1149 | static int uvd_v6_0_wait_for_idle(void *handle) |
1150 | { |
1151 | unsigned i; |
1152 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1153 | |
1154 | for (i = 0; i < adev->usec_timeout; i++) { |
1155 | if (uvd_v6_0_is_idle(handle)) |
1156 | return 0; |
1157 | } |
1158 | return -ETIMEDOUT60; |
1159 | } |
1160 | |
1161 | #define AMDGPU_UVD_STATUS_BUSY_MASK0xfd 0xfd |
1162 | static bool_Bool uvd_v6_0_check_soft_reset(void *handle) |
1163 | { |
1164 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1165 | u32 srbm_soft_reset = 0; |
1166 | u32 tmp = RREG32(mmSRBM_STATUS)amdgpu_device_rreg(adev, (0x394), 0); |
1167 | |
1168 | if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING)(((tmp) & 0x2) >> 0x1) || |
1169 | REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY)(((tmp) & 0x80000) >> 0x13) || |
1170 | (RREG32(mmUVD_STATUS)amdgpu_device_rreg(adev, (0x3daf), 0) & AMDGPU_UVD_STATUS_BUSY_MASK0xfd)) |
1171 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1)(((srbm_soft_reset) & ~0x40000) | (0x40000 & ((1) << 0x12))); |
1172 | |
1173 | if (srbm_soft_reset) { |
1174 | adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; |
1175 | return true1; |
1176 | } else { |
1177 | adev->uvd.inst->srbm_soft_reset = 0; |
1178 | return false0; |
1179 | } |
1180 | } |
1181 | |
1182 | static int uvd_v6_0_pre_soft_reset(void *handle) |
1183 | { |
1184 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1185 | |
1186 | if (!adev->uvd.inst->srbm_soft_reset) |
1187 | return 0; |
1188 | |
1189 | uvd_v6_0_stop(adev); |
1190 | return 0; |
1191 | } |
1192 | |
1193 | static int uvd_v6_0_soft_reset(void *handle) |
1194 | { |
1195 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1196 | u32 srbm_soft_reset; |
1197 | |
1198 | if (!adev->uvd.inst->srbm_soft_reset) |
1199 | return 0; |
1200 | srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; |
1201 | |
1202 | if (srbm_soft_reset) { |
1203 | u32 tmp; |
1204 | |
1205 | tmp = RREG32(mmSRBM_SOFT_RESET)amdgpu_device_rreg(adev, (0x398), 0); |
1206 | tmp |= srbm_soft_reset; |
1207 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp)do { } while(0); |
1208 | WREG32(mmSRBM_SOFT_RESET, tmp)amdgpu_device_wreg(adev, (0x398), (tmp), 0); |
1209 | tmp = RREG32(mmSRBM_SOFT_RESET)amdgpu_device_rreg(adev, (0x398), 0); |
1210 | |
1211 | udelay(50); |
1212 | |
1213 | tmp &= ~srbm_soft_reset; |
1214 | WREG32(mmSRBM_SOFT_RESET, tmp)amdgpu_device_wreg(adev, (0x398), (tmp), 0); |
1215 | tmp = RREG32(mmSRBM_SOFT_RESET)amdgpu_device_rreg(adev, (0x398), 0); |
Value stored to 'tmp' is never read | |
1216 | |
1217 | /* Wait a little for things to settle down */ |
1218 | udelay(50); |
1219 | } |
1220 | |
1221 | return 0; |
1222 | } |
1223 | |
1224 | static int uvd_v6_0_post_soft_reset(void *handle) |
1225 | { |
1226 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1227 | |
1228 | if (!adev->uvd.inst->srbm_soft_reset) |
1229 | return 0; |
1230 | |
1231 | mdelay(5); |
1232 | |
1233 | return uvd_v6_0_start(adev); |
1234 | } |
1235 | |
1236 | static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, |
1237 | struct amdgpu_irq_src *source, |
1238 | unsigned type, |
1239 | enum amdgpu_interrupt_state state) |
1240 | { |
1241 | // TODO |
1242 | return 0; |
1243 | } |
1244 | |
1245 | static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, |
1246 | struct amdgpu_irq_src *source, |
1247 | struct amdgpu_iv_entry *entry) |
1248 | { |
1249 | bool_Bool int_handled = true1; |
1250 | DRM_DEBUG("IH: UVD TRAP\n")___drm_dbg(((void *)0), DRM_UT_CORE, "IH: UVD TRAP\n"); |
1251 | |
1252 | switch (entry->src_id) { |
1253 | case 124: |
1254 | amdgpu_fence_process(&adev->uvd.inst->ring); |
1255 | break; |
1256 | case 119: |
1257 | if (likely(uvd_v6_0_enc_support(adev))__builtin_expect(!!(uvd_v6_0_enc_support(adev)), 1)) |
1258 | amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); |
1259 | else |
1260 | int_handled = false0; |
1261 | break; |
1262 | case 120: |
1263 | if (likely(uvd_v6_0_enc_support(adev))__builtin_expect(!!(uvd_v6_0_enc_support(adev)), 1)) |
1264 | amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); |
1265 | else |
1266 | int_handled = false0; |
1267 | break; |
1268 | } |
1269 | |
1270 | if (!int_handled) |
1271 | DRM_ERROR("Unhandled interrupt: %d %d\n",__drm_err("Unhandled interrupt: %d %d\n", entry->src_id, entry ->src_data[0]) |
1272 | entry->src_id, entry->src_data[0])__drm_err("Unhandled interrupt: %d %d\n", entry->src_id, entry ->src_data[0]); |
1273 | |
1274 | return 0; |
1275 | } |
1276 | |
1277 | static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool_Bool enable) |
1278 | { |
1279 | uint32_t data1, data3; |
1280 | |
1281 | data1 = RREG32(mmUVD_SUVD_CGC_GATE)amdgpu_device_rreg(adev, (0x3be4), 0); |
1282 | data3 = RREG32(mmUVD_CGC_GATE)amdgpu_device_rreg(adev, (0x3d2a), 0); |
1283 | |
1284 | data1 |= UVD_SUVD_CGC_GATE__SRE_MASK0x1 | |
1285 | UVD_SUVD_CGC_GATE__SIT_MASK0x2 | |
1286 | UVD_SUVD_CGC_GATE__SMP_MASK0x4 | |
1287 | UVD_SUVD_CGC_GATE__SCM_MASK0x8 | |
1288 | UVD_SUVD_CGC_GATE__SDB_MASK0x10 | |
1289 | UVD_SUVD_CGC_GATE__SRE_H264_MASK0x20 | |
1290 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK0x40 | |
1291 | UVD_SUVD_CGC_GATE__SIT_H264_MASK0x80 | |
1292 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK0x100 | |
1293 | UVD_SUVD_CGC_GATE__SCM_H264_MASK0x200 | |
1294 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK0x400 | |
1295 | UVD_SUVD_CGC_GATE__SDB_H264_MASK0x800 | |
1296 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK0x1000; |
1297 | |
1298 | if (enable) { |
1299 | data3 |= (UVD_CGC_GATE__SYS_MASK0x1 | |
1300 | UVD_CGC_GATE__UDEC_MASK0x2 | |
1301 | UVD_CGC_GATE__MPEG2_MASK0x4 | |
1302 | UVD_CGC_GATE__RBC_MASK0x10 | |
1303 | UVD_CGC_GATE__LMI_MC_MASK0x20 | |
1304 | UVD_CGC_GATE__LMI_UMC_MASK0x40 | |
1305 | UVD_CGC_GATE__IDCT_MASK0x80 | |
1306 | UVD_CGC_GATE__MPRD_MASK0x100 | |
1307 | UVD_CGC_GATE__MPC_MASK0x200 | |
1308 | UVD_CGC_GATE__LBSI_MASK0x400 | |
1309 | UVD_CGC_GATE__LRBBM_MASK0x800 | |
1310 | UVD_CGC_GATE__UDEC_RE_MASK0x1000 | |
1311 | UVD_CGC_GATE__UDEC_CM_MASK0x2000 | |
1312 | UVD_CGC_GATE__UDEC_IT_MASK0x4000 | |
1313 | UVD_CGC_GATE__UDEC_DB_MASK0x8000 | |
1314 | UVD_CGC_GATE__UDEC_MP_MASK0x10000 | |
1315 | UVD_CGC_GATE__WCB_MASK0x20000 | |
1316 | UVD_CGC_GATE__JPEG_MASK0x100000 | |
1317 | UVD_CGC_GATE__SCPU_MASK0x80000 | |
1318 | UVD_CGC_GATE__JPEG2_MASK0x200000); |
1319 | /* only in pg enabled, we can gate clock to vcpu*/ |
1320 | if (adev->pg_flags & AMD_PG_SUPPORT_UVD(1 << 3)) |
1321 | data3 |= UVD_CGC_GATE__VCPU_MASK0x40000; |
1322 | |
1323 | data3 &= ~UVD_CGC_GATE__REGS_MASK0x8; |
1324 | } else { |
1325 | data3 = 0; |
1326 | } |
1327 | |
1328 | WREG32(mmUVD_SUVD_CGC_GATE, data1)amdgpu_device_wreg(adev, (0x3be4), (data1), 0); |
1329 | WREG32(mmUVD_CGC_GATE, data3)amdgpu_device_wreg(adev, (0x3d2a), (data3), 0); |
1330 | } |
1331 | |
1332 | static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev) |
1333 | { |
1334 | uint32_t data, data2; |
1335 | |
1336 | data = RREG32(mmUVD_CGC_CTRL)amdgpu_device_rreg(adev, (0x3d2c), 0); |
1337 | data2 = RREG32(mmUVD_SUVD_CGC_CTRL)amdgpu_device_rreg(adev, (0x3be6), 0); |
1338 | |
1339 | |
1340 | data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK0x7c0 | |
1341 | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK0x3c); |
1342 | |
1343 | |
1344 | data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK0x1 | |
1345 | (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)0x2) | |
1346 | (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)0x6); |
1347 | |
1348 | data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK0x800 | |
1349 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK0x1000 | |
1350 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK0x2000 | |
1351 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK0x4000 | |
1352 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK0x8000 | |
1353 | UVD_CGC_CTRL__SYS_MODE_MASK0x10000 | |
1354 | UVD_CGC_CTRL__UDEC_MODE_MASK0x20000 | |
1355 | UVD_CGC_CTRL__MPEG2_MODE_MASK0x40000 | |
1356 | UVD_CGC_CTRL__REGS_MODE_MASK0x80000 | |
1357 | UVD_CGC_CTRL__RBC_MODE_MASK0x100000 | |
1358 | UVD_CGC_CTRL__LMI_MC_MODE_MASK0x200000 | |
1359 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK0x400000 | |
1360 | UVD_CGC_CTRL__IDCT_MODE_MASK0x800000 | |
1361 | UVD_CGC_CTRL__MPRD_MODE_MASK0x1000000 | |
1362 | UVD_CGC_CTRL__MPC_MODE_MASK0x2000000 | |
1363 | UVD_CGC_CTRL__LBSI_MODE_MASK0x4000000 | |
1364 | UVD_CGC_CTRL__LRBBM_MODE_MASK0x8000000 | |
1365 | UVD_CGC_CTRL__WCB_MODE_MASK0x10000000 | |
1366 | UVD_CGC_CTRL__VCPU_MODE_MASK0x20000000 | |
1367 | UVD_CGC_CTRL__JPEG_MODE_MASK0x80000000 | |
1368 | UVD_CGC_CTRL__SCPU_MODE_MASK0x40000000 | |
1369 | UVD_CGC_CTRL__JPEG2_MODE_MASK0x2); |
1370 | data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK0x1 | |
1371 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK0x2 | |
1372 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK0x4 | |
1373 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK0x8 | |
1374 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK0x10); |
1375 | |
1376 | WREG32(mmUVD_CGC_CTRL, data)amdgpu_device_wreg(adev, (0x3d2c), (data), 0); |
1377 | WREG32(mmUVD_SUVD_CGC_CTRL, data2)amdgpu_device_wreg(adev, (0x3be6), (data2), 0); |
1378 | } |
1379 | |
1380 | #if 0 |
1381 | static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev) |
1382 | { |
1383 | uint32_t data, data1, cgc_flags, suvd_flags; |
1384 | |
1385 | data = RREG32(mmUVD_CGC_GATE)amdgpu_device_rreg(adev, (0x3d2a), 0); |
1386 | data1 = RREG32(mmUVD_SUVD_CGC_GATE)amdgpu_device_rreg(adev, (0x3be4), 0); |
1387 | |
1388 | cgc_flags = UVD_CGC_GATE__SYS_MASK0x1 | |
1389 | UVD_CGC_GATE__UDEC_MASK0x2 | |
1390 | UVD_CGC_GATE__MPEG2_MASK0x4 | |
1391 | UVD_CGC_GATE__RBC_MASK0x10 | |
1392 | UVD_CGC_GATE__LMI_MC_MASK0x20 | |
1393 | UVD_CGC_GATE__IDCT_MASK0x80 | |
1394 | UVD_CGC_GATE__MPRD_MASK0x100 | |
1395 | UVD_CGC_GATE__MPC_MASK0x200 | |
1396 | UVD_CGC_GATE__LBSI_MASK0x400 | |
1397 | UVD_CGC_GATE__LRBBM_MASK0x800 | |
1398 | UVD_CGC_GATE__UDEC_RE_MASK0x1000 | |
1399 | UVD_CGC_GATE__UDEC_CM_MASK0x2000 | |
1400 | UVD_CGC_GATE__UDEC_IT_MASK0x4000 | |
1401 | UVD_CGC_GATE__UDEC_DB_MASK0x8000 | |
1402 | UVD_CGC_GATE__UDEC_MP_MASK0x10000 | |
1403 | UVD_CGC_GATE__WCB_MASK0x20000 | |
1404 | UVD_CGC_GATE__VCPU_MASK0x40000 | |
1405 | UVD_CGC_GATE__SCPU_MASK0x80000 | |
1406 | UVD_CGC_GATE__JPEG_MASK0x100000 | |
1407 | UVD_CGC_GATE__JPEG2_MASK0x200000; |
1408 | |
1409 | suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK0x1 | |
1410 | UVD_SUVD_CGC_GATE__SIT_MASK0x2 | |
1411 | UVD_SUVD_CGC_GATE__SMP_MASK0x4 | |
1412 | UVD_SUVD_CGC_GATE__SCM_MASK0x8 | |
1413 | UVD_SUVD_CGC_GATE__SDB_MASK0x10; |
1414 | |
1415 | data |= cgc_flags; |
1416 | data1 |= suvd_flags; |
1417 | |
1418 | WREG32(mmUVD_CGC_GATE, data)amdgpu_device_wreg(adev, (0x3d2a), (data), 0); |
1419 | WREG32(mmUVD_SUVD_CGC_GATE, data1)amdgpu_device_wreg(adev, (0x3be4), (data1), 0); |
1420 | } |
1421 | #endif |
1422 | |
1423 | static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev, |
1424 | bool_Bool enable) |
1425 | { |
1426 | u32 orig, data; |
1427 | |
1428 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG(1ULL << 13))) { |
1429 | data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL)adev->uvd_ctx_rreg(adev, (0xc0)); |
1430 | data |= 0xfff; |
1431 | WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data)adev->uvd_ctx_wreg(adev, (0xc0), (data)); |
1432 | |
1433 | orig = data = RREG32(mmUVD_CGC_CTRL)amdgpu_device_rreg(adev, (0x3d2c), 0); |
1434 | data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK0x1; |
1435 | if (orig != data) |
1436 | WREG32(mmUVD_CGC_CTRL, data)amdgpu_device_wreg(adev, (0x3d2c), (data), 0); |
1437 | } else { |
1438 | data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL)adev->uvd_ctx_rreg(adev, (0xc0)); |
1439 | data &= ~0xfff; |
1440 | WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data)adev->uvd_ctx_wreg(adev, (0xc0), (data)); |
1441 | |
1442 | orig = data = RREG32(mmUVD_CGC_CTRL)amdgpu_device_rreg(adev, (0x3d2c), 0); |
1443 | data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK0x1; |
1444 | if (orig != data) |
1445 | WREG32(mmUVD_CGC_CTRL, data)amdgpu_device_wreg(adev, (0x3d2c), (data), 0); |
1446 | } |
1447 | } |
1448 | |
1449 | static int uvd_v6_0_set_clockgating_state(void *handle, |
1450 | enum amd_clockgating_state state) |
1451 | { |
1452 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1453 | bool_Bool enable = (state == AMD_CG_STATE_GATE); |
1454 | |
1455 | if (enable) { |
1456 | /* wait for STATUS to clear */ |
1457 | if (uvd_v6_0_wait_for_idle(handle)) |
1458 | return -EBUSY16; |
1459 | uvd_v6_0_enable_clock_gating(adev, true1); |
1460 | /* enable HW gates because UVD is idle */ |
1461 | /* uvd_v6_0_set_hw_clock_gating(adev); */ |
1462 | } else { |
1463 | /* disable HW gating and enable Sw gating */ |
1464 | uvd_v6_0_enable_clock_gating(adev, false0); |
1465 | } |
1466 | uvd_v6_0_set_sw_clock_gating(adev); |
1467 | return 0; |
1468 | } |
1469 | |
1470 | static int uvd_v6_0_set_powergating_state(void *handle, |
1471 | enum amd_powergating_state state) |
1472 | { |
1473 | /* This doesn't actually powergate the UVD block. |
1474 | * That's done in the dpm code via the SMC. This |
1475 | * just re-inits the block as necessary. The actual |
1476 | * gating still happens in the dpm code. We should |
1477 | * revisit this when there is a cleaner line between |
1478 | * the smc and the hw blocks |
1479 | */ |
1480 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1481 | int ret = 0; |
1482 | |
1483 | WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK)amdgpu_device_wreg(adev, (0x38c4), (0x100), 0); |
1484 | |
1485 | if (state == AMD_PG_STATE_GATE) { |
1486 | uvd_v6_0_stop(adev); |
1487 | } else { |
1488 | ret = uvd_v6_0_start(adev); |
1489 | if (ret) |
1490 | goto out; |
1491 | } |
1492 | |
1493 | out: |
1494 | return ret; |
1495 | } |
1496 | |
1497 | static void uvd_v6_0_get_clockgating_state(void *handle, u64 *flags) |
1498 | { |
1499 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1500 | int data; |
1501 | |
1502 | mutex_lock(&adev->pm.mutex)rw_enter_write(&adev->pm.mutex); |
1503 | |
1504 | if (adev->flags & AMD_IS_APU) |
1505 | data = RREG32_SMC(ixCURRENT_PG_STATUS_APU)adev->smc_rreg(adev, (0xd020029c)); |
1506 | else |
1507 | data = RREG32_SMC(ixCURRENT_PG_STATUS)adev->smc_rreg(adev, (0xc020029c)); |
1508 | |
1509 | if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK0x00000004) { |
1510 | DRM_INFO("Cannot get clockgating state when UVD is powergated.\n")printk("\0016" "[" "drm" "] " "Cannot get clockgating state when UVD is powergated.\n" ); |
1511 | goto out; |
1512 | } |
1513 | |
1514 | /* AMD_CG_SUPPORT_UVD_MGCG */ |
1515 | data = RREG32(mmUVD_CGC_CTRL)amdgpu_device_rreg(adev, (0x3d2c), 0); |
1516 | if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK0x1) |
1517 | *flags |= AMD_CG_SUPPORT_UVD_MGCG(1ULL << 13); |
1518 | |
1519 | out: |
1520 | mutex_unlock(&adev->pm.mutex)rw_exit_write(&adev->pm.mutex); |
1521 | } |
1522 | |
1523 | static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { |
1524 | .name = "uvd_v6_0", |
1525 | .early_init = uvd_v6_0_early_init, |
1526 | .late_init = NULL((void *)0), |
1527 | .sw_init = uvd_v6_0_sw_init, |
1528 | .sw_fini = uvd_v6_0_sw_fini, |
1529 | .hw_init = uvd_v6_0_hw_init, |
1530 | .hw_fini = uvd_v6_0_hw_fini, |
1531 | .suspend = uvd_v6_0_suspend, |
1532 | .resume = uvd_v6_0_resume, |
1533 | .is_idle = uvd_v6_0_is_idle, |
1534 | .wait_for_idle = uvd_v6_0_wait_for_idle, |
1535 | .check_soft_reset = uvd_v6_0_check_soft_reset, |
1536 | .pre_soft_reset = uvd_v6_0_pre_soft_reset, |
1537 | .soft_reset = uvd_v6_0_soft_reset, |
1538 | .post_soft_reset = uvd_v6_0_post_soft_reset, |
1539 | .set_clockgating_state = uvd_v6_0_set_clockgating_state, |
1540 | .set_powergating_state = uvd_v6_0_set_powergating_state, |
1541 | .get_clockgating_state = uvd_v6_0_get_clockgating_state, |
1542 | }; |
1543 | |
1544 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { |
1545 | .type = AMDGPU_RING_TYPE_UVD, |
1546 | .align_mask = 0xf, |
1547 | .support_64bit_ptrs = false0, |
1548 | .no_user_fence = true1, |
1549 | .get_rptr = uvd_v6_0_ring_get_rptr, |
1550 | .get_wptr = uvd_v6_0_ring_get_wptr, |
1551 | .set_wptr = uvd_v6_0_ring_set_wptr, |
1552 | .parse_cs = amdgpu_uvd_ring_parse_cs, |
1553 | .emit_frame_size = |
1554 | 6 + /* hdp invalidate */ |
1555 | 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ |
1556 | 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */ |
1557 | .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ |
1558 | .emit_ib = uvd_v6_0_ring_emit_ib, |
1559 | .emit_fence = uvd_v6_0_ring_emit_fence, |
1560 | .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, |
1561 | .test_ring = uvd_v6_0_ring_test_ring, |
1562 | .test_ib = amdgpu_uvd_ring_test_ib, |
1563 | .insert_nop = uvd_v6_0_ring_insert_nop, |
1564 | .pad_ib = amdgpu_ring_generic_pad_ib, |
1565 | .begin_use = amdgpu_uvd_ring_begin_use, |
1566 | .end_use = amdgpu_uvd_ring_end_use, |
1567 | .emit_wreg = uvd_v6_0_ring_emit_wreg, |
1568 | }; |
1569 | |
1570 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { |
1571 | .type = AMDGPU_RING_TYPE_UVD, |
1572 | .align_mask = 0xf, |
1573 | .support_64bit_ptrs = false0, |
1574 | .no_user_fence = true1, |
1575 | .get_rptr = uvd_v6_0_ring_get_rptr, |
1576 | .get_wptr = uvd_v6_0_ring_get_wptr, |
1577 | .set_wptr = uvd_v6_0_ring_set_wptr, |
1578 | .emit_frame_size = |
1579 | 6 + /* hdp invalidate */ |
1580 | 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ |
1581 | VI_FLUSH_GPU_TLB_NUM_WREG3 * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */ |
1582 | 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */ |
1583 | .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ |
1584 | .emit_ib = uvd_v6_0_ring_emit_ib, |
1585 | .emit_fence = uvd_v6_0_ring_emit_fence, |
1586 | .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, |
1587 | .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, |
1588 | .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, |
1589 | .test_ring = uvd_v6_0_ring_test_ring, |
1590 | .test_ib = amdgpu_uvd_ring_test_ib, |
1591 | .insert_nop = uvd_v6_0_ring_insert_nop, |
1592 | .pad_ib = amdgpu_ring_generic_pad_ib, |
1593 | .begin_use = amdgpu_uvd_ring_begin_use, |
1594 | .end_use = amdgpu_uvd_ring_end_use, |
1595 | .emit_wreg = uvd_v6_0_ring_emit_wreg, |
1596 | }; |
1597 | |
1598 | static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { |
1599 | .type = AMDGPU_RING_TYPE_UVD_ENC, |
1600 | .align_mask = 0x3f, |
1601 | .nop = HEVC_ENC_CMD_NO_OP0x00000000, |
1602 | .support_64bit_ptrs = false0, |
1603 | .no_user_fence = true1, |
1604 | .get_rptr = uvd_v6_0_enc_ring_get_rptr, |
1605 | .get_wptr = uvd_v6_0_enc_ring_get_wptr, |
1606 | .set_wptr = uvd_v6_0_enc_ring_set_wptr, |
1607 | .emit_frame_size = |
1608 | 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */ |
1609 | 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */ |
1610 | 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */ |
1611 | 1, /* uvd_v6_0_enc_ring_insert_end */ |
1612 | .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */ |
1613 | .emit_ib = uvd_v6_0_enc_ring_emit_ib, |
1614 | .emit_fence = uvd_v6_0_enc_ring_emit_fence, |
1615 | .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush, |
1616 | .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync, |
1617 | .test_ring = uvd_v6_0_enc_ring_test_ring, |
1618 | .test_ib = uvd_v6_0_enc_ring_test_ib, |
1619 | .insert_nop = amdgpu_ring_insert_nop, |
1620 | .insert_end = uvd_v6_0_enc_ring_insert_end, |
1621 | .pad_ib = amdgpu_ring_generic_pad_ib, |
1622 | .begin_use = amdgpu_uvd_ring_begin_use, |
1623 | .end_use = amdgpu_uvd_ring_end_use, |
1624 | }; |
1625 | |
1626 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) |
1627 | { |
1628 | if (adev->asic_type >= CHIP_POLARIS10) { |
1629 | adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs; |
1630 | DRM_INFO("UVD is enabled in VM mode\n")printk("\0016" "[" "drm" "] " "UVD is enabled in VM mode\n"); |
1631 | } else { |
1632 | adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs; |
1633 | DRM_INFO("UVD is enabled in physical mode\n")printk("\0016" "[" "drm" "] " "UVD is enabled in physical mode\n" ); |
1634 | } |
1635 | } |
1636 | |
1637 | static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev) |
1638 | { |
1639 | int i; |
1640 | |
1641 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) |
1642 | adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; |
1643 | |
1644 | DRM_INFO("UVD ENC is enabled in VM mode\n")printk("\0016" "[" "drm" "] " "UVD ENC is enabled in VM mode\n" ); |
1645 | } |
1646 | |
1647 | static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { |
1648 | .set = uvd_v6_0_set_interrupt_state, |
1649 | .process = uvd_v6_0_process_interrupt, |
1650 | }; |
1651 | |
1652 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) |
1653 | { |
1654 | if (uvd_v6_0_enc_support(adev)) |
1655 | adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; |
1656 | else |
1657 | adev->uvd.inst->irq.num_types = 1; |
1658 | |
1659 | adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs; |
1660 | } |
1661 | |
1662 | const struct amdgpu_ip_block_version uvd_v6_0_ip_block = |
1663 | { |
1664 | .type = AMD_IP_BLOCK_TYPE_UVD, |
1665 | .major = 6, |
1666 | .minor = 0, |
1667 | .rev = 0, |
1668 | .funcs = &uvd_v6_0_ip_funcs, |
1669 | }; |
1670 | |
1671 | const struct amdgpu_ip_block_version uvd_v6_2_ip_block = |
1672 | { |
1673 | .type = AMD_IP_BLOCK_TYPE_UVD, |
1674 | .major = 6, |
1675 | .minor = 2, |
1676 | .rev = 0, |
1677 | .funcs = &uvd_v6_0_ip_funcs, |
1678 | }; |
1679 | |
1680 | const struct amdgpu_ip_block_version uvd_v6_3_ip_block = |
1681 | { |
1682 | .type = AMD_IP_BLOCK_TYPE_UVD, |
1683 | .major = 6, |
1684 | .minor = 3, |
1685 | .rev = 0, |
1686 | .funcs = &uvd_v6_0_ip_funcs, |
1687 | }; |