Bug Summary

File:dev/pci/drm/amd/amdgpu/vcn_v1_0.c
Warning:line 1326, column 5
Value stored to 'ring' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name vcn_v1_0.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/amd/amdgpu/vcn_v1_0.c
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25
26#include "amdgpu.h"
27#include "amdgpu_vcn.h"
28#include "amdgpu_pm.h"
29#include "soc15.h"
30#include "soc15d.h"
31#include "soc15_common.h"
32
33#include "vcn/vcn_1_0_offset.h"
34#include "vcn/vcn_1_0_sh_mask.h"
35#include "hdp/hdp_4_0_offset.h"
36#include "mmhub/mmhub_9_1_offset.h"
37#include "mmhub/mmhub_9_1_sh_mask.h"
38
39#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
40#include "jpeg_v1_0.h"
41#include "vcn_v1_0.h"
42
43#define mmUVD_RBC_XX_IB_REG_CHECK_1_00x05ab 0x05ab
44#define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX1 1
45#define mmUVD_REG_XX_MASK_1_00x05ac 0x05ac
46#define mmUVD_REG_XX_MASK_1_0_BASE_IDX1 1
47
48static int vcn_v1_0_stop(struct amdgpu_device *adev);
49static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
50static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
51static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
52static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
53static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
54 int inst_idx, struct dpg_pause_state *new_state);
55
56static void vcn_v1_0_idle_work_handler(struct work_struct *work);
57static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
58
59/**
60 * vcn_v1_0_early_init - set function pointers
61 *
62 * @handle: amdgpu_device pointer
63 *
64 * Set ring and irq function pointers
65 */
66static int vcn_v1_0_early_init(void *handle)
67{
68 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
69
70 adev->vcn.num_vcn_inst = 1;
71 adev->vcn.num_enc_rings = 2;
72
73 vcn_v1_0_set_dec_ring_funcs(adev);
74 vcn_v1_0_set_enc_ring_funcs(adev);
75 vcn_v1_0_set_irq_funcs(adev);
76
77 jpeg_v1_0_early_init(handle);
78
79 return 0;
80}
81
82/**
83 * vcn_v1_0_sw_init - sw init for VCN block
84 *
85 * @handle: amdgpu_device pointer
86 *
87 * Load firmware and sw initialization
88 */
89static int vcn_v1_0_sw_init(void *handle)
90{
91 struct amdgpu_ring *ring;
92 int i, r;
93 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
94
95 /* VCN DEC TRAP */
96 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
97 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT124, &adev->vcn.inst->irq);
98 if (r)
99 return r;
100
101 /* VCN ENC TRAP */
102 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
103 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE119,
104 &adev->vcn.inst->irq);
105 if (r)
106 return r;
107 }
108
109 r = amdgpu_vcn_sw_init(adev);
110 if (r)
111 return r;
112
113 /* Override the work func */
114#ifdef __linux__
115 adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
116#else
117 task_set(&adev->vcn.idle_work.work.task,
118 (void (*)(void *))vcn_v1_0_idle_work_handler,
119 &adev->vcn.idle_work.work);
120#endif
121
122 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
123 const struct common_firmware_header *hdr;
124 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
125 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
126 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
127 adev->firmware.fw_size +=
128 roundup2(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE)(((((__uint32_t)(hdr->ucode_size_bytes))) + (((1 << 12
)) - 1)) & (~((__typeof(((__uint32_t)(hdr->ucode_size_bytes
))))((1 << 12)) - 1)))
;
129 dev_info(adev->dev, "Will use PSP to load VCN firmware\n")do { } while(0);
130 }
131
132 r = amdgpu_vcn_resume(adev);
133 if (r)
134 return r;
135
136 ring = &adev->vcn.inst->ring_dec;
137 snprintf(ring->name, sizeof(ring->name), "vcn_dec");
138 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
139 AMDGPU_RING_PRIO_DEFAULT1);
140 if (r)
141 return r;
142
143 adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
144 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9)(adev->reg_offset[UVD_HWIP][0][1] + 0x00dd);
145 adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
146 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0)(adev->reg_offset[UVD_HWIP][0][1] + 0x03c4);
147 adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
148 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1)(adev->reg_offset[UVD_HWIP][0][1] + 0x03c5);
149 adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
150 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD)(adev->reg_offset[UVD_HWIP][0][1] + 0x03c3);
151 adev->vcn.internal.nop = adev->vcn.inst->external.nop =
152 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP)(adev->reg_offset[UVD_HWIP][0][1] + 0x03ff);
153
154 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
155 ring = &adev->vcn.inst->ring_enc[i];
156 snprintf(ring->name, sizeof(ring->name), "vcn_enc%d", i);
157 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
158 AMDGPU_RING_PRIO_DEFAULT1);
159 if (r)
160 return r;
161 }
162
163 adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
164
165 r = jpeg_v1_0_sw_init(handle);
166
167 return r;
168}
169
170/**
171 * vcn_v1_0_sw_fini - sw fini for VCN block
172 *
173 * @handle: amdgpu_device pointer
174 *
175 * VCN suspend and free up sw allocation
176 */
177static int vcn_v1_0_sw_fini(void *handle)
178{
179 int r;
180 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
181
182 r = amdgpu_vcn_suspend(adev);
183 if (r)
184 return r;
185
186 jpeg_v1_0_sw_fini(handle);
187
188 r = amdgpu_vcn_sw_fini(adev);
189
190 return r;
191}
192
193/**
194 * vcn_v1_0_hw_init - start and test VCN block
195 *
196 * @handle: amdgpu_device pointer
197 *
198 * Initialize the hardware, boot up the VCPU and do some testing
199 */
200static int vcn_v1_0_hw_init(void *handle)
201{
202 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
203 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
204 int i, r;
205
206 r = amdgpu_ring_test_helper(ring);
207 if (r)
208 goto done;
209
210 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
211 ring = &adev->vcn.inst->ring_enc[i];
212 r = amdgpu_ring_test_helper(ring);
213 if (r)
214 goto done;
215 }
216
217 ring = &adev->jpeg.inst->ring_dec;
218 r = amdgpu_ring_test_helper(ring);
219 if (r)
220 goto done;
221
222done:
223 if (!r)
224 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",printk("\0016" "[" "drm" "] " "VCN decode and encode initialized successfully(under %s).\n"
, (adev->pg_flags & (1 << 15))?"DPG Mode":"SPG Mode"
)
225 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode")printk("\0016" "[" "drm" "] " "VCN decode and encode initialized successfully(under %s).\n"
, (adev->pg_flags & (1 << 15))?"DPG Mode":"SPG Mode"
)
;
226
227 return r;
228}
229
230/**
231 * vcn_v1_0_hw_fini - stop the hardware block
232 *
233 * @handle: amdgpu_device pointer
234 *
235 * Stop the VCN block, mark ring as not ready any more
236 */
237static int vcn_v1_0_hw_fini(void *handle)
238{
239 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
240
241 cancel_delayed_work_sync(&adev->vcn.idle_work);
242
243 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15)) ||
244 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
245 RREG32_SOC15(VCN, 0, mmUVD_STATUS)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x05af), 0)
)) {
246 vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
247 }
248
249 return 0;
250}
251
252/**
253 * vcn_v1_0_suspend - suspend VCN block
254 *
255 * @handle: amdgpu_device pointer
256 *
257 * HW fini and suspend VCN block
258 */
259static int vcn_v1_0_suspend(void *handle)
260{
261 int r;
262 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
263 bool_Bool idle_work_unexecuted;
264
265 idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
266 if (idle_work_unexecuted) {
267 if (adev->pm.dpm_enabled)
268 amdgpu_dpm_enable_uvd(adev, false0);
269 }
270
271 r = vcn_v1_0_hw_fini(adev);
272 if (r)
273 return r;
274
275 r = amdgpu_vcn_suspend(adev);
276
277 return r;
278}
279
280/**
281 * vcn_v1_0_resume - resume VCN block
282 *
283 * @handle: amdgpu_device pointer
284 *
285 * Resume firmware and hw init VCN block
286 */
287static int vcn_v1_0_resume(void *handle)
288{
289 int r;
290 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
291
292 r = amdgpu_vcn_resume(adev);
293 if (r)
294 return r;
295
296 r = vcn_v1_0_hw_init(adev);
297
298 return r;
299}
300
301/**
302 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
303 *
304 * @adev: amdgpu_device pointer
305 *
306 * Let the VCN memory controller know it's offsets
307 */
308static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
309{
310 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4)(((adev->vcn.fw->size + 4) + (4096 - 1)) & ~(4096 -
1))
;
311 uint32_t offset;
312
313 /* cache window 0: fw */
314 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
315 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x045f)), ((adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].
tmr_mc_addr_lo)), 0)
316 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x045f)), ((adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].
tmr_mc_addr_lo)), 0)
;
317 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x045e)), ((adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].
tmr_mc_addr_hi)), 0)
318 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x045e)), ((adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].
tmr_mc_addr_hi)), 0)
;
319 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0582)), (0), 0)
;
320 offset = 0;
321 } else {
322 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x045f)), (((u32)(adev->vcn.inst->gpu_addr))), 0)
323 lower_32_bits(adev->vcn.inst->gpu_addr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x045f)), (((u32)(adev->vcn.inst->gpu_addr))), 0)
;
324 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x045e)), (((u32)(((adev->vcn.inst->gpu_addr) >>
16) >> 16))), 0)
325 upper_32_bits(adev->vcn.inst->gpu_addr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x045e)), (((u32)(((adev->vcn.inst->gpu_addr) >>
16) >> 16))), 0)
;
326 offset = size;
327 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0582)), (256 >> 3), 0)
328 AMDGPU_UVD_FIRMWARE_OFFSET >> 3)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0582)), (256 >> 3), 0)
;
329 }
330
331 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0583)), (size), 0)
;
332
333 /* cache window 1: stack */
334 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03ec)), (((u32)(adev->vcn.inst->gpu_addr + offset
))), 0)
335 lower_32_bits(adev->vcn.inst->gpu_addr + offset))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03ec)), (((u32)(adev->vcn.inst->gpu_addr + offset
))), 0)
;
336 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03ed)), (((u32)(((adev->vcn.inst->gpu_addr + offset
) >> 16) >> 16))), 0)
337 upper_32_bits(adev->vcn.inst->gpu_addr + offset))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03ed)), (((u32)(((adev->vcn.inst->gpu_addr + offset
) >> 16) >> 16))), 0)
;
338 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0584)), (0), 0)
;
339 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0585)), ((128*1024)), 0)
;
340
341 /* cache window 2: context */
342 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03f0)), (((u32)(adev->vcn.inst->gpu_addr + offset
+ (128*1024)))), 0)
343 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03f0)), (((u32)(adev->vcn.inst->gpu_addr + offset
+ (128*1024)))), 0)
;
344 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03f1)), (((u32)(((adev->vcn.inst->gpu_addr + offset
+ (128*1024)) >> 16) >> 16))), 0)
345 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03f1)), (((u32)(((adev->vcn.inst->gpu_addr + offset
+ (128*1024)) >> 16) >> 16))), 0)
;
346 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0586)), (0), 0)
;
347 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0587)), ((512*1024)), 0)
;
348
349 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03d3)), (adev->gfx.config.gb_addr_config), 0)
350 adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03d3)), (adev->gfx.config.gb_addr_config), 0)
;
351 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03d4)), (adev->gfx.config.gb_addr_config), 0)
352 adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03d4)), (adev->gfx.config.gb_addr_config), 0)
;
353 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03d5)), (adev->gfx.config.gb_addr_config), 0)
354 adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03d5)), (adev->gfx.config.gb_addr_config), 0)
;
355 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03d2)), (adev->gfx.config.gb_addr_config), 0)
356 adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x03d2)), (adev->gfx.config.gb_addr_config), 0)
;
357 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0192)), (adev->gfx.config.gb_addr_config), 0)
358 adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0192)), (adev->gfx.config.gb_addr_config), 0)
;
359 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0184)), (adev->gfx.config.gb_addr_config), 0)
360 adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0184)), (adev->gfx.config.gb_addr_config), 0)
;
361 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x01c5)), (adev->gfx.config.gb_addr_config), 0)
362 adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x01c5)), (adev->gfx.config.gb_addr_config), 0)
;
363 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0186)), (adev->gfx.config.gb_addr_config), 0)
364 adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0186)), (adev->gfx.config.gb_addr_config), 0)
;
365 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0193)), (adev->gfx.config.gb_addr_config), 0)
366 adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0193)), (adev->gfx.config.gb_addr_config), 0)
;
367 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0185)), (adev->gfx.config.gb_addr_config), 0)
368 adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0185)), (adev->gfx.config.gb_addr_config), 0)
;
369 WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x021f)), (adev->gfx.config.gb_addr_config), 0)
370 adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x021f)), (adev->gfx.config.gb_addr_config), 0)
;
371 WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0238)), (adev->gfx.config.gb_addr_config), 0)
372 adev->gfx.config.gb_addr_config)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0238)), (adev->gfx.config.gb_addr_config), 0)
;
373}
374
375static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
376{
377 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4)(((adev->vcn.fw->size + 4) + (4096 - 1)) & ~(4096 -
1))
;
378 uint32_t offset;
379
380 /* cache window 0: fw */
381 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
382 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((adev->firmware.ucode[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_lo)), 0); amdgpu_device_wreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x045f) << 0x10
) | (0 << 0x4)), 0); } while (0)
383 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((adev->firmware.ucode[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_lo)), 0); amdgpu_device_wreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x045f) << 0x10
) | (0 << 0x4)), 0); } while (0)
384 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((adev->firmware.ucode[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_lo)), 0); amdgpu_device_wreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x045f) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
385 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((adev->firmware.ucode[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_hi)), 0); amdgpu_device_wreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x045e) << 0x10
) | (0 << 0x4)), 0); } while (0)
386 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((adev->firmware.ucode[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_hi)), 0); amdgpu_device_wreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x045e) << 0x10
) | (0 << 0x4)), 0); } while (0)
387 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((adev->firmware.ucode[AMDGPU_UCODE_ID_VCN
].tmr_mc_addr_hi)), 0); amdgpu_device_wreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x045e) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
388 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0582) << 0x10
) | (0 << 0x4)), 0); } while (0)
389 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0582) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
390 offset = 0;
391 } else {
392 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(adev->vcn.inst->gpu_addr))),
0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev,
((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x045f) << 0x10
) | (0 << 0x4)), 0); } while (0)
393 lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(adev->vcn.inst->gpu_addr))),
0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev,
((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x045f) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
394 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(((adev->vcn.inst->gpu_addr) >>
16) >> 16))), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x045e) << 0x10
) | (0 << 0x4)), 0); } while (0)
395 upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(((adev->vcn.inst->gpu_addr) >>
16) >> 16))), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x045e) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
396 offset = size;
397 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (256 >> 3), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0582) << 0x10) | (0 << 0x4)), 0); } while
(0)
398 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (256 >> 3), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0582) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
399 }
400
401 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (size), 0); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0);
amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][
1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x0583) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
402
403 /* cache window 1: stack */
404 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(adev->vcn.inst->gpu_addr + offset
))), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x03ec) << 0x10
) | (0 << 0x4)), 0); } while (0)
405 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(adev->vcn.inst->gpu_addr + offset
))), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x03ec) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
406 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(((adev->vcn.inst->gpu_addr +
offset) >> 16) >> 16))), 0); amdgpu_device_wreg(
adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03ed) << 0x10) | (0 << 0x4)), 0); } while
(0)
407 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(((adev->vcn.inst->gpu_addr +
offset) >> 16) >> 16))), 0); amdgpu_device_wreg(
adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03ed) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
408 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0584) << 0x10
) | (0 << 0x4)), 0); } while (0)
409 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0584) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
410 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((128*1024)), 0); amdgpu_device_wreg(adev,
((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0585) << 0x10) | (0 << 0x4)), 0); } while
(0)
411 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((128*1024)), 0); amdgpu_device_wreg(adev,
((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0585) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
412
413 /* cache window 2: context */
414 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(adev->vcn.inst->gpu_addr + offset
+ (128*1024)))), 0); amdgpu_device_wreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x03f0) << 0x10
) | (0 << 0x4)), 0); } while (0)
415 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(adev->vcn.inst->gpu_addr + offset
+ (128*1024)))), 0); amdgpu_device_wreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x03f0) << 0x10
) | (0 << 0x4)), 0); } while (0)
416 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(adev->vcn.inst->gpu_addr + offset
+ (128*1024)))), 0); amdgpu_device_wreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x03f0) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
417 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(((adev->vcn.inst->gpu_addr +
offset + (128*1024)) >> 16) >> 16))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03f1) << 0x10) | (0 << 0x4)), 0); } while
(0)
418 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(((adev->vcn.inst->gpu_addr +
offset + (128*1024)) >> 16) >> 16))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03f1) << 0x10) | (0 << 0x4)), 0); } while
(0)
419 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((u32)(((adev->vcn.inst->gpu_addr +
offset + (128*1024)) >> 16) >> 16))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03f1) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
420 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0586) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
421 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((512*1024)), 0); amdgpu_device_wreg(adev,
((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0587) << 0x10) | (0 << 0x4)), 0); } while
(0)
422 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((512*1024)), 0); amdgpu_device_wreg(adev,
((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0587) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
423
424 /* VCN global tiling registers */
425 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03d3) << 0x10) | (0 << 0x4)), 0); } while
(0)
426 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03d3) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
427 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03d4) << 0x10) | (0 << 0x4)), 0); } while
(0)
428 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03d4) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
429 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03d5) << 0x10) | (0 << 0x4)), 0); } while
(0)
430 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03d5) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
431 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03d2) << 0x10) | (0 << 0x4)), 0); } while
(0)
432 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x03d2) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
433 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0192) << 0x10) | (0 << 0x4)), 0); } while
(0)
434 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0192) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
435 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0184) << 0x10) | (0 << 0x4)), 0); } while
(0)
436 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0184) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
437 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x01c5) << 0x10) | (0 << 0x4)), 0); } while
(0)
438 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x01c5) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
439 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0186) << 0x10) | (0 << 0x4)), 0); } while
(0)
440 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0186) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
441 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0193) << 0x10) | (0 << 0x4)), 0); } while
(0)
442 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0193) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
443 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0185) << 0x10) | (0 << 0x4)), 0); } while
(0)
444 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (adev->gfx.config.gb_addr_config), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0185) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
445}
446
447/**
448 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
449 *
450 * @adev: amdgpu_device pointer
451 * @sw: enable SW clock gating
452 *
453 * Disable clock gating for VCN block
454 */
455static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
456{
457 uint32_t data;
458
459 /* JPEG disable CGC */
460 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x0565), 0)
;
461
462 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG(1 << 24))
463 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT0x0;
464 else
465 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK0x00000001L;
466
467 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT0x2;
468 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT0x6;
469 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x0565)), (data), 0)
;
470
471 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x0526), 0)
;
472 data &= ~(JPEG_CGC_GATE__JPEG_MASK0x00100000L | JPEG_CGC_GATE__JPEG2_MASK0x00200000L);
473 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x0526)), (data), 0)
;
474
475 /* UVD disable CGC */
476 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x052c), 0)
;
477 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG(1 << 24))
478 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT0x0;
479 else
480 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK0x00000001L;
481
482 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT0x2;
483 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT0x6;
484 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x052c)), (data), 0)
;
485
486 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x052a), 0)
;
487 data &= ~(UVD_CGC_GATE__SYS_MASK0x00000001L
488 | UVD_CGC_GATE__UDEC_MASK0x00000002L
489 | UVD_CGC_GATE__MPEG2_MASK0x00000004L
490 | UVD_CGC_GATE__REGS_MASK0x00000008L
491 | UVD_CGC_GATE__RBC_MASK0x00000010L
492 | UVD_CGC_GATE__LMI_MC_MASK0x00000020L
493 | UVD_CGC_GATE__LMI_UMC_MASK0x00000040L
494 | UVD_CGC_GATE__IDCT_MASK0x00000080L
495 | UVD_CGC_GATE__MPRD_MASK0x00000100L
496 | UVD_CGC_GATE__MPC_MASK0x00000200L
497 | UVD_CGC_GATE__LBSI_MASK0x00000400L
498 | UVD_CGC_GATE__LRBBM_MASK0x00000800L
499 | UVD_CGC_GATE__UDEC_RE_MASK0x00001000L
500 | UVD_CGC_GATE__UDEC_CM_MASK0x00002000L
501 | UVD_CGC_GATE__UDEC_IT_MASK0x00004000L
502 | UVD_CGC_GATE__UDEC_DB_MASK0x00008000L
503 | UVD_CGC_GATE__UDEC_MP_MASK0x00010000L
504 | UVD_CGC_GATE__WCB_MASK0x00020000L
505 | UVD_CGC_GATE__VCPU_MASK0x00040000L
506 | UVD_CGC_GATE__SCPU_MASK0x00080000L);
507 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x052a)), (data), 0)
;
508
509 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x052c), 0)
;
510 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK0x00000800L
511 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK0x00001000L
512 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK0x00002000L
513 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK0x00004000L
514 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK0x00008000L
515 | UVD_CGC_CTRL__SYS_MODE_MASK0x00010000L
516 | UVD_CGC_CTRL__UDEC_MODE_MASK0x00020000L
517 | UVD_CGC_CTRL__MPEG2_MODE_MASK0x00040000L
518 | UVD_CGC_CTRL__REGS_MODE_MASK0x00080000L
519 | UVD_CGC_CTRL__RBC_MODE_MASK0x00100000L
520 | UVD_CGC_CTRL__LMI_MC_MODE_MASK0x00200000L
521 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK0x00400000L
522 | UVD_CGC_CTRL__IDCT_MODE_MASK0x00800000L
523 | UVD_CGC_CTRL__MPRD_MODE_MASK0x01000000L
524 | UVD_CGC_CTRL__MPC_MODE_MASK0x02000000L
525 | UVD_CGC_CTRL__LBSI_MODE_MASK0x04000000L
526 | UVD_CGC_CTRL__LRBBM_MODE_MASK0x08000000L
527 | UVD_CGC_CTRL__WCB_MODE_MASK0x10000000L
528 | UVD_CGC_CTRL__VCPU_MODE_MASK0x20000000L
529 | UVD_CGC_CTRL__SCPU_MODE_MASK0x40000000L);
530 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x052c)), (data), 0)
;
531
532 /* turn on */
533 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x03e4), 0)
;
534 data |= (UVD_SUVD_CGC_GATE__SRE_MASK0x00000001L
535 | UVD_SUVD_CGC_GATE__SIT_MASK0x00000002L
536 | UVD_SUVD_CGC_GATE__SMP_MASK0x00000004L
537 | UVD_SUVD_CGC_GATE__SCM_MASK0x00000008L
538 | UVD_SUVD_CGC_GATE__SDB_MASK0x00000010L
539 | UVD_SUVD_CGC_GATE__SRE_H264_MASK0x00000020L
540 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK0x00000040L
541 | UVD_SUVD_CGC_GATE__SIT_H264_MASK0x00000080L
542 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK0x00000100L
543 | UVD_SUVD_CGC_GATE__SCM_H264_MASK0x00000200L
544 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK0x00000400L
545 | UVD_SUVD_CGC_GATE__SDB_H264_MASK0x00000800L
546 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK0x00001000L
547 | UVD_SUVD_CGC_GATE__SCLR_MASK0x00002000L
548 | UVD_SUVD_CGC_GATE__UVD_SC_MASK0x00004000L
549 | UVD_SUVD_CGC_GATE__ENT_MASK0x00008000L
550 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK0x00020000L
551 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK0x00040000L
552 | UVD_SUVD_CGC_GATE__SITE_MASK0x00080000L
553 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK0x00100000L
554 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK0x00200000L
555 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK0x00400000L
556 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK0x00800000L
557 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK0x01000000L);
558 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x03e4)), (data), 0)
;
559
560 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x03e6), 0)
;
561 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK0x00000001L
562 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK0x00000002L
563 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK0x00000004L
564 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK0x00000008L
565 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK0x00000010L
566 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK0x00000020L
567 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK0x00000040L
568 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK0x00000080L
569 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK0x00000100L
570 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK0x00000200L);
571 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x03e6)), (data), 0)
;
572}
573
574/**
575 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
576 *
577 * @adev: amdgpu_device pointer
578 * @sw: enable SW clock gating
579 *
580 * Enable clock gating for VCN block
581 */
582static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
583{
584 uint32_t data = 0;
585
586 /* enable JPEG CGC */
587 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x0565), 0)
;
588 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG(1 << 24))
589 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT0x0;
590 else
591 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT0x0;
592 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT0x2;
593 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT0x6;
594 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x0565)), (data), 0)
;
595
596 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x0526), 0)
;
597 data |= (JPEG_CGC_GATE__JPEG_MASK0x00100000L | JPEG_CGC_GATE__JPEG2_MASK0x00200000L);
598 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x0526)), (data), 0)
;
599
600 /* enable UVD CGC */
601 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x052c), 0)
;
602 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG(1 << 24))
603 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT0x0;
604 else
605 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT0x0;
606 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT0x2;
607 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT0x6;
608 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x052c)), (data), 0)
;
609
610 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x052c), 0)
;
611 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK0x00000800L
612 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK0x00001000L
613 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK0x00002000L
614 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK0x00004000L
615 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK0x00008000L
616 | UVD_CGC_CTRL__SYS_MODE_MASK0x00010000L
617 | UVD_CGC_CTRL__UDEC_MODE_MASK0x00020000L
618 | UVD_CGC_CTRL__MPEG2_MODE_MASK0x00040000L
619 | UVD_CGC_CTRL__REGS_MODE_MASK0x00080000L
620 | UVD_CGC_CTRL__RBC_MODE_MASK0x00100000L
621 | UVD_CGC_CTRL__LMI_MC_MODE_MASK0x00200000L
622 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK0x00400000L
623 | UVD_CGC_CTRL__IDCT_MODE_MASK0x00800000L
624 | UVD_CGC_CTRL__MPRD_MODE_MASK0x01000000L
625 | UVD_CGC_CTRL__MPC_MODE_MASK0x02000000L
626 | UVD_CGC_CTRL__LBSI_MODE_MASK0x04000000L
627 | UVD_CGC_CTRL__LRBBM_MODE_MASK0x08000000L
628 | UVD_CGC_CTRL__WCB_MODE_MASK0x10000000L
629 | UVD_CGC_CTRL__VCPU_MODE_MASK0x20000000L
630 | UVD_CGC_CTRL__SCPU_MODE_MASK0x40000000L);
631 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x052c)), (data), 0)
;
632
633 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x03e6), 0)
;
634 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK0x00000001L
635 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK0x00000002L
636 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK0x00000004L
637 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK0x00000008L
638 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK0x00000010L
639 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK0x00000020L
640 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK0x00000040L
641 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK0x00000080L
642 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK0x00000100L
643 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK0x00000200L);
644 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x03e6)), (data), 0)
;
645}
646
647static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
648{
649 uint32_t reg_data = 0;
650
651 /* disable JPEG CGC */
652 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG(1 << 24))
653 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT0x0;
654 else
655 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT0x0;
656 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT0x2;
657 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT0x6;
658 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (reg_data), 0); amdgpu_device_wreg(adev, (
(adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF)
, 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0565) << 0x10) | (sram_sel << 0x4)), 0
); } while (0)
;
659
660 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0526) << 0x10
) | (sram_sel << 0x4)), 0); } while (0)
;
661
662 /* enable sw clock gating control */
663 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG(1 << 24))
664 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT0x0;
665 else
666 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT0x0;
667 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT0x2;
668 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT0x6;
669 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK0x00000800L |
670 UVD_CGC_CTRL__UDEC_CM_MODE_MASK0x00001000L |
671 UVD_CGC_CTRL__UDEC_IT_MODE_MASK0x00002000L |
672 UVD_CGC_CTRL__UDEC_DB_MODE_MASK0x00004000L |
673 UVD_CGC_CTRL__UDEC_MP_MODE_MASK0x00008000L |
674 UVD_CGC_CTRL__SYS_MODE_MASK0x00010000L |
675 UVD_CGC_CTRL__UDEC_MODE_MASK0x00020000L |
676 UVD_CGC_CTRL__MPEG2_MODE_MASK0x00040000L |
677 UVD_CGC_CTRL__REGS_MODE_MASK0x00080000L |
678 UVD_CGC_CTRL__RBC_MODE_MASK0x00100000L |
679 UVD_CGC_CTRL__LMI_MC_MODE_MASK0x00200000L |
680 UVD_CGC_CTRL__LMI_UMC_MODE_MASK0x00400000L |
681 UVD_CGC_CTRL__IDCT_MODE_MASK0x00800000L |
682 UVD_CGC_CTRL__MPRD_MODE_MASK0x01000000L |
683 UVD_CGC_CTRL__MPC_MODE_MASK0x02000000L |
684 UVD_CGC_CTRL__LBSI_MODE_MASK0x04000000L |
685 UVD_CGC_CTRL__LRBBM_MODE_MASK0x08000000L |
686 UVD_CGC_CTRL__WCB_MODE_MASK0x10000000L |
687 UVD_CGC_CTRL__VCPU_MODE_MASK0x20000000L |
688 UVD_CGC_CTRL__SCPU_MODE_MASK0x40000000L);
689 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (reg_data), 0); amdgpu_device_wreg(adev, (
(adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF)
, 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x052c) << 0x10) | (sram_sel << 0x4)), 0
); } while (0)
;
690
691 /* turn off clock gating */
692 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x052a) << 0x10
) | (sram_sel << 0x4)), 0); } while (0)
;
693
694 /* turn on SUVD clock gating */
695 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (1), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x03e4) << 0x10
) | (sram_sel << 0x4)), 0); } while (0)
;
696
697 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
698 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x03e6) << 0x10
) | (sram_sel << 0x4)), 0); } while (0)
;
699}
700
701static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
702{
703 uint32_t data = 0;
704
705 if (adev->pg_flags & AMD_PG_SUPPORT_VCN(1 << 14)) {
706 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT0x0
707 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT0x2
708 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT0x4
709 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT0x6
710 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT0x8
711 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT0xa
712 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT0xc
713 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT0xe
714 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT0x10
715 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT0x12
716 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT0x14);
717
718 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x00c0)), (data), 0)
;
719 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[VCN_HWIP][0][1] + 0x00c1), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0xFFFFFF
)) != (UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON)) { if (old_ != tmp_
) { loop = adev->usec_timeout; old_ = tmp_; } else udelay(
1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP
][0][1] + 0x00c1), 0); loop--; if (!loop) { printk("\0014" "["
"drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_PGFSM_STATUS", (unsigned)UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON
, (unsigned)(tmp_ & (0xFFFFFF))); ret = -60; break; } } }
while (0); ret; })
;
720 } else {
721 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT0x0
722 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT0x2
723 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT0x4
724 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT0x6
725 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT0x8
726 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT0xa
727 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT0xc
728 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT0xe
729 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT0x10
730 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT0x12
731 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT0x14);
732 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x00c0)), (data), 0)
;
733 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[VCN_HWIP][0][1] + 0x00c1), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0xFFFFFFFF
)) != (0)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[VCN_HWIP][0][1] + 0x00c1), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_PGFSM_STATUS", (unsigned)0, (unsigned)(tmp_ &
(0xFFFFFFFF))); ret = -60; break; } } } while (0); ret; })
;
734 }
735
736 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
737
738 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x00c4), 0)
;
739 data &= ~0x103;
740 if (adev->pg_flags & AMD_PG_SUPPORT_VCN(1 << 14))
741 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK0x00000100L;
742
743 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x00c4)), (data), 0)
;
744}
745
746static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
747{
748 uint32_t data = 0;
749
750 if (adev->pg_flags & AMD_PG_SUPPORT_VCN(1 << 14)) {
751 /* Before power off, this indicator has to be turned on */
752 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x00c4), 0)
;
753 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK0x00000003L;
754 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
755 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x00c4)), (data), 0)
;
756
757
758 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT0x0
759 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT0x2
760 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT0x4
761 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT0x6
762 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT0x8
763 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT0xa
764 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT0xc
765 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT0xe
766 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT0x10
767 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT0x12
768 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT0x14);
769
770 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data)amdgpu_device_wreg(adev, ((adev->reg_offset[VCN_HWIP][0][1
] + 0x00c0)), (data), 0)
;
771
772 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT0x0
773 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT0x2
774 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT0x4
775 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT0x6
776 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT0x8
777 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT0xa
778 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT0xc
779 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT0xe
780 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT0x10
781 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT0x12
782 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT0x14);
783 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[VCN_HWIP][0][1] + 0x00c1), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0xFFFFFFFF
)) != (data)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[VCN_HWIP][0][1] + 0x00c1), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_PGFSM_STATUS", (unsigned)data, (unsigned)(tmp_ &
(0xFFFFFFFF))); ret = -60; break; } } } while (0); ret; })
;
784 }
785}
786
787/**
788 * vcn_v1_0_start - start VCN block
789 *
790 * @adev: amdgpu_device pointer
791 *
792 * Setup and start the VCN block
793 */
794static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
795{
796 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
797 uint32_t rb_bufsz, tmp;
798 uint32_t lmi_swap_cntl;
799 int i, j, r;
800
801 /* disable byte swapping */
802 lmi_swap_cntl = 0;
803
804 vcn_1_0_disable_static_power_gating(adev);
805
806 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x05af), 0)
| UVD_STATUS__UVD_BUSY;
807 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05af)), (tmp), 0)
;
808
809 /* disable clock gating */
810 vcn_v1_0_disable_clock_gating(adev);
811
812 /* disable interupt */
813 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x0540)), 0); tmp_ &= (~0x00000002L); tmp_
|= ((0) & ~(~0x00000002L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x0540)), (tmp_), 0); } while
(0)
814 ~UVD_MASTINT_EN__VCPU_EN_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x0540)), 0); tmp_ &= (~0x00000002L); tmp_
|= ((0) & ~(~0x00000002L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x0540)), (tmp_), 0); } while
(0)
;
815
816 /* initialize VCN memory controller */
817 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x0566), 0)
;
818 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0566)), (tmp | 0x00000100L | 0x00001000L | 0x00002000L |
0x00200000L), 0)
819 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0566)), (tmp | 0x00000100L | 0x00001000L | 0x00002000L |
0x00200000L), 0)
820 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0566)), (tmp | 0x00000100L | 0x00001000L | 0x00002000L |
0x00200000L), 0)
821 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0566)), (tmp | 0x00000100L | 0x00001000L | 0x00002000L |
0x00200000L), 0)
822 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0566)), (tmp | 0x00000100L | 0x00001000L | 0x00002000L |
0x00200000L), 0)
;
823
824#ifdef __BIG_ENDIAN
825 /* swap (8 in 32) RB and IB */
826 lmi_swap_cntl = 0xa;
827#endif
828 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x056d)), (lmi_swap_cntl), 0)
;
829
830 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x0577), 0)
;
831 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK0x00000038L;
832 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT0x3;
833 WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0577)), (tmp), 0)
;
834
835 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0579)), (((0x1 << 0x6) | (0x2 << 0xc) | (0x3
<< 0x12) | (0x4 << 0x18))), 0)
836 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0579)), (((0x1 << 0x6) | (0x2 << 0xc) | (0x3
<< 0x12) | (0x4 << 0x18))), 0)
837 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0579)), (((0x1 << 0x6) | (0x2 << 0xc) | (0x3
<< 0x12) | (0x4 << 0x18))), 0)
838 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0579)), (((0x1 << 0x6) | (0x2 << 0xc) | (0x3
<< 0x12) | (0x4 << 0x18))), 0)
839 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0579)), (((0x1 << 0x6) | (0x2 << 0xc) | (0x3
<< 0x12) | (0x4 << 0x18))), 0)
;
840
841 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x057b)), (((0x1 << 0x6) | (0x2 << 0xc) | (0x3
<< 0x12) | (0x4 << 0x18))), 0)
842 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x057b)), (((0x1 << 0x6) | (0x2 << 0xc) | (0x3
<< 0x12) | (0x4 << 0x18))), 0)
843 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x057b)), (((0x1 << 0x6) | (0x2 << 0xc) | (0x3
<< 0x12) | (0x4 << 0x18))), 0)
844 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x057b)), (((0x1 << 0x6) | (0x2 << 0xc) | (0x3
<< 0x12) | (0x4 << 0x18))), 0)
845 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x057b)), (((0x1 << 0x6) | (0x2 << 0xc) | (0x3
<< 0x12) | (0x4 << 0x18))), 0)
;
846
847 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x057d)), (((0x0 << 0x0) | (0x1 << 0x3) | (0x2
<< 0x6))), 0)
848 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x057d)), (((0x0 << 0x0) | (0x1 << 0x3) | (0x2
<< 0x6))), 0)
849 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x057d)), (((0x0 << 0x0) | (0x1 << 0x3) | (0x2
<< 0x6))), 0)
850 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x057d)), (((0x0 << 0x0) | (0x1 << 0x3) | (0x2
<< 0x6))), 0)
;
851
852 vcn_v1_0_mc_resume_spg_mode(adev);
853
854 WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05ac)), (0x10), 0)
;
855 WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05ab)), (amdgpu_device_rreg(adev, (adev->reg_offset[
UVD_HWIP][0][1] + 0x05ab), 0) | 0x3), 0)
856 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05ab)), (amdgpu_device_rreg(adev, (adev->reg_offset[
UVD_HWIP][0][1] + 0x05ab), 0) | 0x3), 0)
;
857
858 /* enable VCPU clock */
859 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0598)), (0x00000200L), 0)
;
860
861 /* boot up the VCPU */
862 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000008L); tmp_
|= ((0) & ~(~0x00000008L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_), 0); } while
(0)
863 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000008L); tmp_
|= ((0) & ~(~0x00000008L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_), 0); } while
(0)
;
864
865 /* enable UMC */
866 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x053d)), 0); tmp_ &= (~0x00000100L); tmp_
|= ((0) & ~(~0x00000100L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x053d)), (tmp_), 0); } while
(0)
867 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x053d)), 0); tmp_ &= (~0x00000100L); tmp_
|= ((0) & ~(~0x00000100L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x053d)), (tmp_), 0); } while
(0)
;
868
869 tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x05a0), 0)
;
870 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK0x00000004L;
871 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK0x00002000L;
872 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a0)), (tmp), 0)
;
873
874 for (i = 0; i < 10; ++i) {
875 uint32_t status;
876
877 for (j = 0; j < 100; ++j) {
878 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x05af), 0)
;
879 if (status & UVD_STATUS__IDLE)
880 break;
881 mdelay(10);
882 }
883 r = 0;
884 if (status & UVD_STATUS__IDLE)
885 break;
886
887 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n")__drm_err("VCN decode not responding, trying to reset the VCPU!!!\n"
)
;
888 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000008L); tmp_
|= ((0x00000008L) & ~(~0x00000008L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_
), 0); } while (0)
889 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000008L); tmp_
|= ((0x00000008L) & ~(~0x00000008L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_
), 0); } while (0)
890 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000008L); tmp_
|= ((0x00000008L) & ~(~0x00000008L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_
), 0); } while (0)
;
891 mdelay(10);
892 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000008L); tmp_
|= ((0) & ~(~0x00000008L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_), 0); } while
(0)
893 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000008L); tmp_
|= ((0) & ~(~0x00000008L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_), 0); } while
(0)
;
894 mdelay(10);
895 r = -1;
896 }
897
898 if (r) {
899 DRM_ERROR("VCN decode not responding, giving up!!!\n")__drm_err("VCN decode not responding, giving up!!!\n");
900 return r;
901 }
902 /* enable master interrupt */
903 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x0540)), 0); tmp_ &= (~0x00000002L); tmp_
|= ((0x00000002L) & ~(~0x00000002L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x0540)), (tmp_
), 0); } while (0)
904 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x0540)), 0); tmp_ &= (~0x00000002L); tmp_
|= ((0x00000002L) & ~(~0x00000002L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x0540)), (tmp_
), 0); } while (0)
;
905
906 /* enable system interrupt for JRBC, TODO: move to set interrupt*/
907 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x0541)), 0); tmp_ &= (~0x00000010L); tmp_
|= ((0x00000010L) & ~(~0x00000010L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x0541)), (tmp_
), 0); } while (0)
908 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x0541)), 0); tmp_ &= (~0x00000010L); tmp_
|= ((0x00000010L) & ~(~0x00000010L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x0541)), (tmp_
), 0); } while (0)
909 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x0541)), 0); tmp_ &= (~0x00000010L); tmp_
|= ((0x00000010L) & ~(~0x00000010L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x0541)), (tmp_
), 0); } while (0)
;
910
911 /* clear the busy bit of UVD_STATUS */
912 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x05af), 0)
& ~UVD_STATUS__UVD_BUSY;
913 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05af)), (tmp), 0)
;
914
915 /* force RBC into idle state */
916 rb_bufsz = order_base_2(ring->ring_size)drm_order(ring->ring_size);
917 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz)(((0) & ~0x0000001FL) | (0x0000001FL & ((rb_bufsz) <<
0x0)))
;
918 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1)(((tmp) & ~0x00001F00L) | (0x00001F00L & ((1) <<
0x8)))
;
919 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1)(((tmp) & ~0x00010000L) | (0x00010000L & ((1) <<
0x10)))
;
920 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1)(((tmp) & ~0x01000000L) | (0x01000000L & ((1) <<
0x18)))
;
921 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1)(((tmp) & ~0x10000000L) | (0x10000000L & ((1) <<
0x1c)))
;
922 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a9)), (tmp), 0)
;
923
924 /* set the write pointer delay */
925 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a6)), (0), 0)
;
926
927 /* set the wb address */
928 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05aa)), ((((u32)(((ring->gpu_addr) >> 16) >>
16)) >> 2)), 0)
929 (upper_32_bits(ring->gpu_addr) >> 2))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05aa)), ((((u32)(((ring->gpu_addr) >> 16) >>
16)) >> 2)), 0)
;
930
931 /* program the RB_BASE for ring buffer */
932 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0469)), (((u32)(ring->gpu_addr))), 0)
933 lower_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0469)), (((u32)(ring->gpu_addr))), 0)
;
934 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0468)), (((u32)(((ring->gpu_addr) >> 16) >>
16))), 0)
935 upper_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0468)), (((u32)(((ring->gpu_addr) >> 16) >>
16))), 0)
;
936
937 /* Initialize the ring buffer's read and write pointers */
938 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a4)), (0), 0)
;
939
940 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x00d6)), (0), 0)
;
941
942 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x05a4), 0)
;
943 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a5)), (((u32)(ring->wptr))), 0)
944 lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a5)), (((u32)(ring->wptr))), 0)
;
945
946 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a9)), 0); tmp_ &= (~0x00010000L); tmp_
|= ((0) & ~(~0x00010000L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x05a9)), (tmp_), 0); } while
(0)
947 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a9)), 0); tmp_ &= (~0x00010000L); tmp_
|= ((0) & ~(~0x00010000L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x05a9)), (tmp_), 0); } while
(0)
;
948
949 ring = &adev->vcn.inst->ring_enc[0];
950 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0429)), (((u32)(ring->wptr))), 0)
;
951 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x042a)), (((u32)(ring->wptr))), 0)
;
952 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0426)), (ring->gpu_addr), 0)
;
953 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0427)), (((u32)(((ring->gpu_addr) >> 16) >>
16))), 0)
;
954 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0428)), (ring->ring_size / 4), 0)
;
955
956 ring = &adev->vcn.inst->ring_enc[1];
957 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0424)), (((u32)(ring->wptr))), 0)
;
958 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0425)), (((u32)(ring->wptr))), 0)
;
959 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0421)), (ring->gpu_addr), 0)
;
960 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0422)), (((u32)(((ring->gpu_addr) >> 16) >>
16))), 0)
;
961 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0423)), (ring->ring_size / 4), 0)
;
962
963 jpeg_v1_0_start(adev, 0);
964
965 return 0;
966}
967
968static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
969{
970 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
971 uint32_t rb_bufsz, tmp;
972 uint32_t lmi_swap_cntl;
973
974 /* disable byte swapping */
975 lmi_swap_cntl = 0;
976
977 vcn_1_0_enable_static_power_gating(adev);
978
979 /* enable dynamic power gating mode */
980 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x00c4), 0)
;
981 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK0x00000004L;
982 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK0x00000100L;
983 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x00c4)), (tmp), 0)
;
984
985 /* enable clock gating */
986 vcn_v1_0_clock_gating_dpg_mode(adev, 0);
987
988 /* enable VCPU clock */
989 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT0x14);
990 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK0x00000200L;
991 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK0x00020000L;
992 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (tmp), 0); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0);
amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][
1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x0598) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
993
994 /* disable interupt */
995 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0x00000002L), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0540) << 0x10
) | (0 << 0x4)), 0); } while (0)
996 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0x00000002L), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0540) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
997
998 /* initialize VCN memory controller */
999 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (0 << 0x4)), 0); } while (0)
1000 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (0 << 0x4)), 0); } while (0)
1001 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (0 << 0x4)), 0); } while (0)
1002 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (0 << 0x4)), 0); } while (0)
1003 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (0 << 0x4)), 0); } while (0)
1004 UVD_LMI_CTRL__REQ_MODE_MASK |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (0 << 0x4)), 0); } while (0)
1005 UVD_LMI_CTRL__CRC_RESET_MASK |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (0 << 0x4)), 0); } while (0)
1006 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (0 << 0x4)), 0); } while (0)
1007 0x00100000L, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
1008
1009#ifdef __BIG_ENDIAN
1010 /* swap (8 in 32) RB and IB */
1011 lmi_swap_cntl = 0xa;
1012#endif
1013 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (lmi_swap_cntl), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x056d) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
1014
1015 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_CNTL,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0x2 << 0x3), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0577) << 0x10) | (0 << 0x4)), 0); } while
(0)
1016 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0x2 << 0x3), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0577) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
1017
1018 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXA0,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x1 << 0x6) | (0x2 << 0xc) |
(0x3 << 0x12) | (0x4 << 0x18))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0579) << 0x10) | (0 << 0x4)), 0); } while
(0)
1019 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x1 << 0x6) | (0x2 << 0xc) |
(0x3 << 0x12) | (0x4 << 0x18))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0579) << 0x10) | (0 << 0x4)), 0); } while
(0)
1020 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x1 << 0x6) | (0x2 << 0xc) |
(0x3 << 0x12) | (0x4 << 0x18))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0579) << 0x10) | (0 << 0x4)), 0); } while
(0)
1021 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x1 << 0x6) | (0x2 << 0xc) |
(0x3 << 0x12) | (0x4 << 0x18))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0579) << 0x10) | (0 << 0x4)), 0); } while
(0)
1022 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x1 << 0x6) | (0x2 << 0xc) |
(0x3 << 0x12) | (0x4 << 0x18))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0579) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
1023
1024 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXB0,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x1 << 0x6) | (0x2 << 0xc) |
(0x3 << 0x12) | (0x4 << 0x18))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x057b) << 0x10) | (0 << 0x4)), 0); } while
(0)
1025 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x1 << 0x6) | (0x2 << 0xc) |
(0x3 << 0x12) | (0x4 << 0x18))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x057b) << 0x10) | (0 << 0x4)), 0); } while
(0)
1026 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x1 << 0x6) | (0x2 << 0xc) |
(0x3 << 0x12) | (0x4 << 0x18))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x057b) << 0x10) | (0 << 0x4)), 0); } while
(0)
1027 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x1 << 0x6) | (0x2 << 0xc) |
(0x3 << 0x12) | (0x4 << 0x18))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x057b) << 0x10) | (0 << 0x4)), 0); } while
(0)
1028 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x1 << 0x6) | (0x2 << 0xc) |
(0x3 << 0x12) | (0x4 << 0x18))), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x057b) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
1029
1030 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUX,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x0 << 0x0) | (0x1 << 0x3) |
(0x2 << 0x6))), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x057d) << 0x10
) | (0 << 0x4)), 0); } while (0)
1031 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x0 << 0x0) | (0x1 << 0x3) |
(0x2 << 0x6))), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x057d) << 0x10
) | (0 << 0x4)), 0); } while (0)
1032 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x0 << 0x0) | (0x1 << 0x3) |
(0x2 << 0x6))), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x057d) << 0x10
) | (0 << 0x4)), 0); } while (0)
1033 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (((0x0 << 0x0) | (0x1 << 0x3) |
(0x2 << 0x6))), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x057d) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
1034
1035 vcn_v1_0_mc_resume_dpg_mode(adev);
1036
1037 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0x10), 0); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0);
amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][
1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x026c) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
1038 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0x3), 0); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0);
amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][
1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x026b) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
1039
1040 /* boot up the VCPU */
1041 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0), 0); amdgpu_device_wreg(adev, ((adev->
reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0) << 0x10
) | (0 << 0x4)), 0); } while (0)
;
1042
1043 /* enable UMC */
1044 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL2,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0x1F << 0x11), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x053d) << 0x10) | (0 << 0x4)), 0); } while
(0)
1045 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0x1F << 0x11), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x053d) << 0x10) | (0 << 0x4)), 0); } while
(0)
1046 0xFFFFFFFF, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0x1F << 0x11), 0); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x053d) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
1047
1048 /* enable master interrupt */
1049 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0x00000002L), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0x00000002L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0540) << 0x10) | (0 << 0x4)), 0); } while
(0)
1050 UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0x00000002L), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0x00000002L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0540) << 0x10) | (0 << 0x4)), 0); } while
(0)
;
1051
1052 vcn_v1_0_clock_gating_dpg_mode(adev, 1);
1053 /* setup mmUVD_LMI_CTRL */
1054 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (1 << 0x4)), 0); } while (0)
1055 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (1 << 0x4)), 0); } while (0)
1056 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (1 << 0x4)), 0); } while (0)
1057 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (1 << 0x4)), 0); } while (0)
1058 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (1 << 0x4)), 0); } while (0)
1059 UVD_LMI_CTRL__REQ_MODE_MASK |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (1 << 0x4)), 0); } while (0)
1060 UVD_LMI_CTRL__CRC_RESET_MASK |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (1 << 0x4)), 0); } while (0)
1061 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (1 << 0x4)), 0); } while (0)
1062 0x00100000L, 0xFFFFFFFF, 1)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), ((8 << 0x0) | 0x00000100L | 0x00002000L
| 0x00200000L | 0x00000200L | 0x00004000L | 0x00001000L | 0x00100000L
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d3)), (0xFFFFFFFF), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d1)), (0x00000001L
| ((adev->reg_offset[UVD_HWIP][0][1] + 0x0566) << 0x10
) | (1 << 0x4)), 0); } while (0)
;
1063
1064 tmp = adev->gfx.config.gb_addr_config;
1065 /* setup VCN global tiling registers */
1066 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (tmp), 0); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0);
amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][
1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x021f) << 0x10) | (1 << 0x4)), 0); } while
(0)
;
1067 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (tmp), 0); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF), 0);
amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][
1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x0238) << 0x10) | (1 << 0x4)), 0); } while
(0)
;
1068
1069 /* enable System Interrupt for JRBC */
1070 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SYS_INT_EN,do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0x00000010L), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0541) << 0x10) | (1 << 0x4)), 0); } while
(0)
1071 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1)do { amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP]
[0][1] + 0x00d2)), (0x00000010L), 0); amdgpu_device_wreg(adev
, ((adev->reg_offset[UVD_HWIP][0][1] + 0x00d3)), (0xFFFFFFFF
), 0); amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP
][0][1] + 0x00d1)), (0x00000001L | ((adev->reg_offset[UVD_HWIP
][0][1] + 0x0541) << 0x10) | (1 << 0x4)), 0); } while
(0)
;
1072
1073 /* force RBC into idle state */
1074 rb_bufsz = order_base_2(ring->ring_size)drm_order(ring->ring_size);
1075 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz)(((0) & ~0x0000001FL) | (0x0000001FL & ((rb_bufsz) <<
0x0)))
;
1076 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1)(((tmp) & ~0x00001F00L) | (0x00001F00L & ((1) <<
0x8)))
;
1077 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1)(((tmp) & ~0x00010000L) | (0x00010000L & ((1) <<
0x10)))
;
1078 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1)(((tmp) & ~0x01000000L) | (0x01000000L & ((1) <<
0x18)))
;
1079 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1)(((tmp) & ~0x10000000L) | (0x10000000L & ((1) <<
0x1c)))
;
1080 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a9)), (tmp), 0)
;
1081
1082 /* set the write pointer delay */
1083 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a6)), (0), 0)
;
1084
1085 /* set the wb address */
1086 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05aa)), ((((u32)(((ring->gpu_addr) >> 16) >>
16)) >> 2)), 0)
1087 (upper_32_bits(ring->gpu_addr) >> 2))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05aa)), ((((u32)(((ring->gpu_addr) >> 16) >>
16)) >> 2)), 0)
;
1088
1089 /* program the RB_BASE for ring buffer */
1090 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0469)), (((u32)(ring->gpu_addr))), 0)
1091 lower_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0469)), (((u32)(ring->gpu_addr))), 0)
;
1092 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0468)), (((u32)(((ring->gpu_addr) >> 16) >>
16))), 0)
1093 upper_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0468)), (((u32)(((ring->gpu_addr) >> 16) >>
16))), 0)
;
1094
1095 /* Initialize the ring buffer's read and write pointers */
1096 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a4)), (0), 0)
;
1097
1098 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x00d6)), (0), 0)
;
1099
1100 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x05a4), 0)
;
1101 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a5)), (((u32)(ring->wptr))), 0)
1102 lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a5)), (((u32)(ring->wptr))), 0)
;
1103
1104 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a9)), 0); tmp_ &= (~0x00010000L); tmp_
|= ((0) & ~(~0x00010000L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x05a9)), (tmp_), 0); } while
(0)
1105 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a9)), 0); tmp_ &= (~0x00010000L); tmp_
|= ((0) & ~(~0x00010000L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x05a9)), (tmp_), 0); } while
(0)
;
1106
1107 jpeg_v1_0_start(adev, 1);
1108
1109 return 0;
1110}
1111
1112static int vcn_v1_0_start(struct amdgpu_device *adev)
1113{
1114 int r;
1115
1116 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15))
1117 r = vcn_v1_0_start_dpg_mode(adev);
1118 else
1119 r = vcn_v1_0_start_spg_mode(adev);
1120 return r;
1121}
1122
1123/**
1124 * vcn_v1_0_stop - stop VCN block
1125 *
1126 * @adev: amdgpu_device pointer
1127 *
1128 * stop the VCN block
1129 */
1130static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
1131{
1132 int tmp;
1133
1134 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x05af), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x7
)) != (UVD_STATUS__IDLE)) { if (old_ != tmp_) { loop = adev->
usec_timeout; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x05af), 0); loop
--; if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_STATUS", (unsigned)UVD_STATUS__IDLE, (unsigned)(tmp_
& (0x7))); ret = -60; break; } } } while (0); ret; })
;
1135
1136 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK0x00000008L |
1137 UVD_LMI_STATUS__READ_CLEAN_MASK0x00000001L |
1138 UVD_LMI_STATUS__WRITE_CLEAN_MASK0x00000002L |
1139 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK0x00000004L;
1140 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x0567), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (tmp
)) != (tmp)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[UVD_HWIP][0][1] + 0x0567), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_LMI_STATUS", (unsigned)tmp, (unsigned)(tmp_ &
(tmp))); ret = -60; break; } } } while (0); ret; })
;
1141
1142 /* put VCPU into reset */
1143 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000008L); tmp_
|= ((0x00000008L) & ~(~0x00000008L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_
), 0); } while (0)
1144 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000008L); tmp_
|= ((0x00000008L) & ~(~0x00000008L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_
), 0); } while (0)
1145 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000008L); tmp_
|= ((0x00000008L) & ~(~0x00000008L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_
), 0); } while (0)
;
1146
1147 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK0x00000200L |
1148 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK0x00000040L;
1149 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x0567), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (tmp
)) != (tmp)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[UVD_HWIP][0][1] + 0x0567), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_LMI_STATUS", (unsigned)tmp, (unsigned)(tmp_ &
(tmp))); ret = -60; break; } } } while (0); ret; })
;
1150
1151 /* disable VCPU clock */
1152 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x0598)), 0); tmp_ &= (~0x00000200L); tmp_
|= ((0) & ~(~0x00000200L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x0598)), (tmp_), 0); } while
(0)
1153 ~UVD_VCPU_CNTL__CLK_EN_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x0598)), 0); tmp_ &= (~0x00000200L); tmp_
|= ((0) & ~(~0x00000200L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x0598)), (tmp_), 0); } while
(0)
;
1154
1155 /* reset LMI UMC/LMI */
1156 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00002000L); tmp_
|= ((0x00002000L) & ~(~0x00002000L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_
), 0); } while (0)
1157 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00002000L); tmp_
|= ((0x00002000L) & ~(~0x00002000L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_
), 0); } while (0)
1158 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00002000L); tmp_
|= ((0x00002000L) & ~(~0x00002000L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_
), 0); } while (0)
;
1159
1160 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000004L); tmp_
|= ((0x00000004L) & ~(~0x00000004L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_
), 0); } while (0)
1161 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000004L); tmp_
|= ((0x00000004L) & ~(~0x00000004L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_
), 0); } while (0)
1162 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x05a0)), 0); tmp_ &= (~0x00000004L); tmp_
|= ((0x00000004L) & ~(~0x00000004L)); amdgpu_device_wreg
(adev, ((adev->reg_offset[UVD_HWIP][0][1] + 0x05a0)), (tmp_
), 0); } while (0)
;
1163
1164 WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05af)), (0), 0)
;
1165
1166 vcn_v1_0_enable_clock_gating(adev);
1167 vcn_1_0_enable_static_power_gating(adev);
1168 return 0;
1169}
1170
1171static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
1172{
1173 uint32_t tmp;
1174
1175 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1176 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF)) { if (old_
!= tmp_) { loop = adev->usec_timeout; old_ = tmp_; } else
udelay(1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014"
"[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
1177 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF)) { if (old_
!= tmp_) { loop = adev->usec_timeout; old_ = tmp_; } else
udelay(1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014"
"[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
1178 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF)) { if (old_
!= tmp_) { loop = adev->usec_timeout; old_ = tmp_; } else
udelay(1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014"
"[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
;
1179
1180 /* wait for read ptr to be equal to write ptr */
1181 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x042a), 0)
;
1182 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x0429), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0xFFFFFFFF
)) != (tmp)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[UVD_HWIP][0][1] + 0x0429), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_RB_RPTR", (unsigned)tmp, (unsigned)(tmp_ & (0xFFFFFFFF
))); ret = -60; break; } } } while (0); ret; })
;
1183
1184 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x0425), 0)
;
1185 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x0424), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0xFFFFFFFF
)) != (tmp)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[UVD_HWIP][0][1] + 0x0424), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_RB_RPTR2", (unsigned)tmp, (unsigned)(tmp_ & (
0xFFFFFFFF))); ret = -60; break; } } } while (0); ret; })
;
1186
1187 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x0509), 0)
;
1188 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x0457), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0xFFFFFFFF
)) != (tmp)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[UVD_HWIP][0][1] + 0x0457), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_JRBC_RB_RPTR", (unsigned)tmp, (unsigned)(tmp_ &
(0xFFFFFFFF))); ret = -60; break; } } } while (0); ret; })
;
1189
1190 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x05a5), 0)
& 0x7FFFFFFF;
1191 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x05a4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0xFFFFFFFF
)) != (tmp)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[UVD_HWIP][0][1] + 0x05a4), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_RBC_RB_RPTR", (unsigned)tmp, (unsigned)(tmp_ &
(0xFFFFFFFF))); ret = -60; break; } } } while (0); ret; })
;
1192
1193 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF)) { if (old_
!= tmp_) { loop = adev->usec_timeout; old_ = tmp_; } else
udelay(1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014"
"[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
1194 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF)) { if (old_
!= tmp_) { loop = adev->usec_timeout; old_ = tmp_; } else
udelay(1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014"
"[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
1195 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF)) { if (old_
!= tmp_) { loop = adev->usec_timeout; old_ = tmp_; } else
udelay(1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014"
"[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
;
1196
1197 /* disable dynamic power gating mode */
1198 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4)), 0); tmp_ &= (~0x00000004L); tmp_
|= ((0) & ~(~0x00000004L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x00c4)), (tmp_), 0); } while
(0)
1199 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK)do { uint32_t tmp_ = amdgpu_device_rreg(adev, ((adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4)), 0); tmp_ &= (~0x00000004L); tmp_
|= ((0) & ~(~0x00000004L)); amdgpu_device_wreg(adev, ((adev
->reg_offset[UVD_HWIP][0][1] + 0x00c4)), (tmp_), 0); } while
(0)
;
1200
1201 return 0;
1202}
1203
1204static int vcn_v1_0_stop(struct amdgpu_device *adev)
1205{
1206 int r;
1207
1208 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15))
1209 r = vcn_v1_0_stop_dpg_mode(adev);
1210 else
1211 r = vcn_v1_0_stop_spg_mode(adev);
1212
1213 return r;
1214}
1215
1216static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
1217 int inst_idx, struct dpg_pause_state *new_state)
1218{
1219 int ret_code;
1220 uint32_t reg_data = 0;
1221 uint32_t reg_data2 = 0;
1222 struct amdgpu_ring *ring;
1223
1224 /* pause/unpause if state is changed */
1225 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1226 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",__drm_dbg(DRM_UT_CORE, "dpg pause state changed %d:%d -> %d:%d"
, adev->vcn.inst[inst_idx].pause_state.fw_based, adev->
vcn.inst[inst_idx].pause_state.jpeg, new_state->fw_based, new_state
->jpeg)
1227 adev->vcn.inst[inst_idx].pause_state.fw_based,__drm_dbg(DRM_UT_CORE, "dpg pause state changed %d:%d -> %d:%d"
, adev->vcn.inst[inst_idx].pause_state.fw_based, adev->
vcn.inst[inst_idx].pause_state.jpeg, new_state->fw_based, new_state
->jpeg)
1228 adev->vcn.inst[inst_idx].pause_state.jpeg,__drm_dbg(DRM_UT_CORE, "dpg pause state changed %d:%d -> %d:%d"
, adev->vcn.inst[inst_idx].pause_state.fw_based, adev->
vcn.inst[inst_idx].pause_state.jpeg, new_state->fw_based, new_state
->jpeg)
1229 new_state->fw_based, new_state->jpeg)__drm_dbg(DRM_UT_CORE, "dpg pause state changed %d:%d -> %d:%d"
, adev->vcn.inst[inst_idx].pause_state.fw_based, adev->
vcn.inst[inst_idx].pause_state.jpeg, new_state->fw_based, new_state
->jpeg)
;
1230
1231 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x00d4), 0)
&
1232 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK0x00000008L);
1233
1234 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1235 ret_code = 0;
1236
1237 if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK0x00000002L))
1238 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF)) { if (old_
!= tmp_) { loop = adev->usec_timeout; old_ = tmp_; } else
udelay(1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014"
"[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
1239 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF)) { if (old_
!= tmp_) { loop = adev->usec_timeout; old_ = tmp_; } else
udelay(1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014"
"[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
1240 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF)) { if (old_
!= tmp_) { loop = adev->usec_timeout; old_ = tmp_; } else
udelay(1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014"
"[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
;
1241
1242 if (!ret_code) {
1243 /* pause DPG non-jpeg */
1244 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK0x00000004L;
1245 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x00d4)), (reg_data), 0)
;
1246 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00d4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000008L
)) != (0x00000008L)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[UVD_HWIP][0][1] + 0x00d4), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_DPG_PAUSE", (unsigned)0x00000008L, (unsigned)(tmp_
& (0x00000008L))); ret = -60; break; } } } while (0); ret
; })
1247 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00d4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000008L
)) != (0x00000008L)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[UVD_HWIP][0][1] + 0x00d4), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_DPG_PAUSE", (unsigned)0x00000008L, (unsigned)(tmp_
& (0x00000008L))); ret = -60; break; } } } while (0); ret
; })
1248 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00d4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000008L
)) != (0x00000008L)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[UVD_HWIP][0][1] + 0x00d4), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_DPG_PAUSE", (unsigned)0x00000008L, (unsigned)(tmp_
& (0x00000008L))); ret = -60; break; } } } while (0); ret
; })
;
1249
1250 /* Restore */
1251 ring = &adev->vcn.inst->ring_enc[0];
1252 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0426)), (ring->gpu_addr), 0)
;
1253 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0427)), (((u32)(((ring->gpu_addr) >> 16) >>
16))), 0)
;
1254 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0428)), (ring->ring_size / 4), 0)
;
1255 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0429)), (((u32)(ring->wptr))), 0)
;
1256 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x042a)), (((u32)(ring->wptr))), 0)
;
1257
1258 ring = &adev->vcn.inst->ring_enc[1];
1259 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0421)), (ring->gpu_addr), 0)
;
1260 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0422)), (((u32)(((ring->gpu_addr) >> 16) >>
16))), 0)
;
1261 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0423)), (ring->ring_size / 4), 0)
;
1262 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0424)), (((u32)(ring->wptr))), 0)
;
1263 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0425)), (((u32)(ring->wptr))), 0)
;
1264
1265 ring = &adev->vcn.inst->ring_dec;
1266 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a5)), (amdgpu_device_rreg(adev, (adev->reg_offset[
UVD_HWIP][0][1] + 0x00d6), 0) & 0x7FFFFFFF), 0)
1267 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a5)), (amdgpu_device_rreg(adev, (adev->reg_offset[
UVD_HWIP][0][1] + 0x00d6), 0) & 0x7FFFFFFF), 0)
;
1268 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON)) { if (old_ != tmp_
) { loop = adev->usec_timeout; old_ = tmp_; } else udelay(
1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP
][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014" "["
"drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
1269 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON)) { if (old_ != tmp_
) { loop = adev->usec_timeout; old_ = tmp_; } else udelay(
1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP
][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014" "["
"drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
1270 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON)) { if (old_ != tmp_
) { loop = adev->usec_timeout; old_ = tmp_; } else udelay(
1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP
][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014" "["
"drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
;
1271 }
1272 } else {
1273 /* unpause dpg non-jpeg, no need to wait */
1274 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK0x00000004L;
1275 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x00d4)), (reg_data), 0)
;
1276 }
1277 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1278 }
1279
1280 /* pause/unpause if state is changed */
1281 if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
1282 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",__drm_dbg(DRM_UT_CORE, "dpg pause state changed %d:%d -> %d:%d"
, adev->vcn.inst[inst_idx].pause_state.fw_based, adev->
vcn.inst[inst_idx].pause_state.jpeg, new_state->fw_based, new_state
->jpeg)
1283 adev->vcn.inst[inst_idx].pause_state.fw_based,__drm_dbg(DRM_UT_CORE, "dpg pause state changed %d:%d -> %d:%d"
, adev->vcn.inst[inst_idx].pause_state.fw_based, adev->
vcn.inst[inst_idx].pause_state.jpeg, new_state->fw_based, new_state
->jpeg)
1284 adev->vcn.inst[inst_idx].pause_state.jpeg,__drm_dbg(DRM_UT_CORE, "dpg pause state changed %d:%d -> %d:%d"
, adev->vcn.inst[inst_idx].pause_state.fw_based, adev->
vcn.inst[inst_idx].pause_state.jpeg, new_state->fw_based, new_state
->jpeg)
1285 new_state->fw_based, new_state->jpeg)__drm_dbg(DRM_UT_CORE, "dpg pause state changed %d:%d -> %d:%d"
, adev->vcn.inst[inst_idx].pause_state.fw_based, adev->
vcn.inst[inst_idx].pause_state.jpeg, new_state->fw_based, new_state
->jpeg)
;
1286
1287 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x00d4), 0)
&
1288 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK0x00000002L);
1289
1290 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
1291 ret_code = 0;
1292
1293 if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK0x00000008L))
1294 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF)) { if (old_
!= tmp_) { loop = adev->usec_timeout; old_ = tmp_; } else
udelay(1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014"
"[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
1295 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF)) { if (old_
!= tmp_) { loop = adev->usec_timeout; old_ = tmp_; } else
udelay(1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014"
"[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
1296 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF)) { if (old_
!= tmp_) { loop = adev->usec_timeout; old_ = tmp_; } else
udelay(1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset
[UVD_HWIP][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014"
"[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
;
1297
1298 if (!ret_code) {
1299 /* Make sure JPRG Snoop is disabled before sending the pause */
1300 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x00c4), 0)
;
1301 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK0x00000400L;
1302 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x00c4)), (reg_data2), 0)
;
1303
1304 /* pause DPG jpeg */
1305 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK0x00000001L;
1306 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x00d4)), (reg_data), 0)
;
1307 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00d4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000002L
)) != (0x00000002L)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[UVD_HWIP][0][1] + 0x00d4), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_DPG_PAUSE", (unsigned)0x00000002L, (unsigned)(tmp_
& (0x00000002L))); ret = -60; break; } } } while (0); ret
; })
1308 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00d4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000002L
)) != (0x00000002L)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[UVD_HWIP][0][1] + 0x00d4), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_DPG_PAUSE", (unsigned)0x00000002L, (unsigned)(tmp_
& (0x00000002L))); ret = -60; break; } } } while (0); ret
; })
1309 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00d4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000002L
)) != (0x00000002L)) { if (old_ != tmp_) { loop = adev->usec_timeout
; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg(adev
, (adev->reg_offset[UVD_HWIP][0][1] + 0x00d4), 0); loop--;
if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_DPG_PAUSE", (unsigned)0x00000002L, (unsigned)(tmp_
& (0x00000002L))); ret = -60; break; } } } while (0); ret
; })
;
1310
1311 /* Restore */
1312 ring = &adev->jpeg.inst->ring_dec;
1313 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0508)), (0), 0)
;
1314 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x050a)), (0x00000001L | 0x00000002L), 0)
1315 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x050a)), (0x00000001L | 0x00000002L), 0)
1316 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x050a)), (0x00000001L | 0x00000002L), 0)
;
1317 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0503)), (((u32)(ring->gpu_addr))), 0)
1318 lower_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0503)), (((u32)(ring->gpu_addr))), 0)
;
1319 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0504)), (((u32)(((ring->gpu_addr) >> 16) >>
16))), 0)
1320 upper_32_bits(ring->gpu_addr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0504)), (((u32)(((ring->gpu_addr) >> 16) >>
16))), 0)
;
1321 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0457)), (ring->wptr), 0)
;
1322 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0509)), (ring->wptr), 0)
;
1323 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x050a)), (0x00000002L), 0)
1324 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x050a)), (0x00000002L), 0)
;
1325
1326 ring = &adev->vcn.inst->ring_dec;
Value stored to 'ring' is never read
1327 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a5)), (amdgpu_device_rreg(adev, (adev->reg_offset[
UVD_HWIP][0][1] + 0x00d6), 0) & 0x7FFFFFFF), 0)
1328 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a5)), (amdgpu_device_rreg(adev, (adev->reg_offset[
UVD_HWIP][0][1] + 0x00d6), 0) & 0x7FFFFFFF), 0)
;
1329 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON)) { if (old_ != tmp_
) { loop = adev->usec_timeout; old_ = tmp_; } else udelay(
1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP
][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014" "["
"drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
1330 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON)) { if (old_ != tmp_
) { loop = adev->usec_timeout; old_ = tmp_; } else udelay(
1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP
][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014" "["
"drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
1331 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[UVD_HWIP][0][1] + 0x00c4), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (0x00000003L
)) != (UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON)) { if (old_ != tmp_
) { loop = adev->usec_timeout; old_ = tmp_; } else udelay(
1); tmp_ = amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP
][0][1] + 0x00c4), 0); loop--; if (!loop) { printk("\0014" "["
"drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_POWER_STATUS", (unsigned)UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON
, (unsigned)(tmp_ & (0x00000003L))); ret = -60; break; } }
} while (0); ret; })
;
1332 }
1333 } else {
1334 /* unpause dpg jpeg, no need to wait */
1335 reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK0x00000001L;
1336 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x00d4)), (reg_data), 0)
;
1337 }
1338 adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
1339 }
1340
1341 return 0;
1342}
1343
1344static bool_Bool vcn_v1_0_is_idle(void *handle)
1345{
1346 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1347
1348 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS)amdgpu_device_rreg(adev, (adev->reg_offset[VCN_HWIP][0][1]
+ 0x05af), 0)
== UVD_STATUS__IDLE);
1349}
1350
1351static int vcn_v1_0_wait_for_idle(void *handle)
1352{
1353 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1354 int ret;
1355
1356 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[VCN_HWIP][0][1] + 0x05af), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (UVD_STATUS__IDLE
)) != (UVD_STATUS__IDLE)) { if (old_ != tmp_) { loop = adev->
usec_timeout; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[VCN_HWIP][0][1] + 0x05af), 0); loop
--; if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_STATUS", (unsigned)UVD_STATUS__IDLE, (unsigned)(tmp_
& (UVD_STATUS__IDLE))); ret = -60; break; } } } while (0
); ret; })
1357 UVD_STATUS__IDLE)({ int ret = 0; do { uint32_t old_ = 0; uint32_t tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[VCN_HWIP][0][1] + 0x05af), 0); uint32_t
loop = adev->usec_timeout; ret = 0; while ((tmp_ & (UVD_STATUS__IDLE
)) != (UVD_STATUS__IDLE)) { if (old_ != tmp_) { loop = adev->
usec_timeout; old_ = tmp_; } else udelay(1); tmp_ = amdgpu_device_rreg
(adev, (adev->reg_offset[VCN_HWIP][0][1] + 0x05af), 0); loop
--; if (!loop) { printk("\0014" "[" "drm" "] " "Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n"
, 0, "mmUVD_STATUS", (unsigned)UVD_STATUS__IDLE, (unsigned)(tmp_
& (UVD_STATUS__IDLE))); ret = -60; break; } } } while (0
); ret; })
;
1358
1359 return ret;
1360}
1361
1362static int vcn_v1_0_set_clockgating_state(void *handle,
1363 enum amd_clockgating_state state)
1364{
1365 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1366 bool_Bool enable = (state == AMD_CG_STATE_GATE);
1367
1368 if (enable) {
1369 /* wait for STATUS to clear */
1370 if (!vcn_v1_0_is_idle(handle))
1371 return -EBUSY16;
1372 vcn_v1_0_enable_clock_gating(adev);
1373 } else {
1374 /* disable HW gating and enable Sw gating */
1375 vcn_v1_0_disable_clock_gating(adev);
1376 }
1377 return 0;
1378}
1379
1380/**
1381 * vcn_v1_0_dec_ring_get_rptr - get read pointer
1382 *
1383 * @ring: amdgpu_ring pointer
1384 *
1385 * Returns the current hardware read pointer
1386 */
1387static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1388{
1389 struct amdgpu_device *adev = ring->adev;
1390
1391 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x05a4), 0)
;
1392}
1393
1394/**
1395 * vcn_v1_0_dec_ring_get_wptr - get write pointer
1396 *
1397 * @ring: amdgpu_ring pointer
1398 *
1399 * Returns the current hardware write pointer
1400 */
1401static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1402{
1403 struct amdgpu_device *adev = ring->adev;
1404
1405 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x05a5), 0)
;
1406}
1407
1408/**
1409 * vcn_v1_0_dec_ring_set_wptr - set write pointer
1410 *
1411 * @ring: amdgpu_ring pointer
1412 *
1413 * Commits the write pointer to the hardware
1414 */
1415static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1416{
1417 struct amdgpu_device *adev = ring->adev;
1418
1419 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15))
1420 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x00d6)), (((u32)(ring->wptr)) | 0x80000000), 0)
1421 lower_32_bits(ring->wptr) | 0x80000000)amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x00d6)), (((u32)(ring->wptr)) | 0x80000000), 0)
;
1422
1423 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x05a5)), (((u32)(ring->wptr))), 0)
;
1424}
1425
1426/**
1427 * vcn_v1_0_dec_ring_insert_start - insert a start command
1428 *
1429 * @ring: amdgpu_ring pointer
1430 *
1431 * Write a start command to the ring.
1432 */
1433static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1434{
1435 struct amdgpu_device *adev = ring->adev;
1436
1437 amdgpu_ring_write(ring,
1438 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c4
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1439 amdgpu_ring_write(ring, 0);
1440 amdgpu_ring_write(ring,
1441 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c3
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1442 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START0x0000000a << 1);
1443}
1444
1445/**
1446 * vcn_v1_0_dec_ring_insert_end - insert a end command
1447 *
1448 * @ring: amdgpu_ring pointer
1449 *
1450 * Write a end command to the ring.
1451 */
1452static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1453{
1454 struct amdgpu_device *adev = ring->adev;
1455
1456 amdgpu_ring_write(ring,
1457 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c3
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1458 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END0x0000000b << 1);
1459}
1460
1461/**
1462 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1463 *
1464 * @ring: amdgpu_ring pointer
1465 * @fence: fence to emit
1466 *
1467 * Write a fence and a trap command to the ring.
1468 */
1469static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1470 unsigned flags)
1471{
1472 struct amdgpu_device *adev = ring->adev;
1473
1474 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT)({ int __ret = !!(flags & (1 << 0)); if (__ret) printf
("WARNING %s failed at %s:%d\n", "flags & (1 << 0)"
, "/usr/src/sys/dev/pci/drm/amd/amdgpu/vcn_v1_0.c", 1474); __builtin_expect
(!!(__ret), 0); })
;
1475
1476 amdgpu_ring_write(ring,
1477 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x05bd
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1478 amdgpu_ring_write(ring, seq);
1479 amdgpu_ring_write(ring,
1480 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c4
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1481 amdgpu_ring_write(ring, addr & 0xffffffff);
1482 amdgpu_ring_write(ring,
1483 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c5
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1484 amdgpu_ring_write(ring, upper_32_bits(addr)((u32)(((addr) >> 16) >> 16)) & 0xff);
1485 amdgpu_ring_write(ring,
1486 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c3
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1487 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE0x00000000 << 1);
1488
1489 amdgpu_ring_write(ring,
1490 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c4
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1491 amdgpu_ring_write(ring, 0);
1492 amdgpu_ring_write(ring,
1493 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c5
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1494 amdgpu_ring_write(ring, 0);
1495 amdgpu_ring_write(ring,
1496 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c3
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1497 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP0x00000001 << 1);
1498}
1499
1500/**
1501 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1502 *
1503 * @ring: amdgpu_ring pointer
1504 * @ib: indirect buffer to execute
1505 *
1506 * Write ring commands to execute the indirect buffer
1507 */
1508static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1509 struct amdgpu_job *job,
1510 struct amdgpu_ib *ib,
1511 uint32_t flags)
1512{
1513 struct amdgpu_device *adev = ring->adev;
1514 unsigned vmid = AMDGPU_JOB_GET_VMID(job)((job) ? (job)->vmid : 0);
1515
1516 amdgpu_ring_write(ring,
1517 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x05a1
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1518 amdgpu_ring_write(ring, vmid);
1519
1520 amdgpu_ring_write(ring,
1521 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x0467
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1522 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)((u32)(ib->gpu_addr)));
1523 amdgpu_ring_write(ring,
1524 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x0466
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1525 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)((u32)(((ib->gpu_addr) >> 16) >> 16)));
1526 amdgpu_ring_write(ring,
1527 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x05a2
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1528 amdgpu_ring_write(ring, ib->length_dw);
1529}
1530
1531static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1532 uint32_t reg, uint32_t val,
1533 uint32_t mask)
1534{
1535 struct amdgpu_device *adev = ring->adev;
1536
1537 amdgpu_ring_write(ring,
1538 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c4
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1539 amdgpu_ring_write(ring, reg << 2);
1540 amdgpu_ring_write(ring,
1541 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c5
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1542 amdgpu_ring_write(ring, val);
1543 amdgpu_ring_write(ring,
1544 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x040a
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1545 amdgpu_ring_write(ring, mask);
1546 amdgpu_ring_write(ring,
1547 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c3
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1548 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT0x00000006 << 1);
1549}
1550
1551static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1552 unsigned vmid, uint64_t pd_addr)
1553{
1554 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1555 uint32_t data0, data1, mask;
1556
1557 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr)(ring)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((ring
), (vmid), (pd_addr))
;
1558
1559 /* wait for register write */
1560 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1561 data1 = lower_32_bits(pd_addr)((u32)(pd_addr));
1562 mask = 0xffffffff;
1563 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1564}
1565
1566static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1567 uint32_t reg, uint32_t val)
1568{
1569 struct amdgpu_device *adev = ring->adev;
1570
1571 amdgpu_ring_write(ring,
1572 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c4
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1573 amdgpu_ring_write(ring, reg << 2);
1574 amdgpu_ring_write(ring,
1575 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c5
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1576 amdgpu_ring_write(ring, val);
1577 amdgpu_ring_write(ring,
1578 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03c3
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1579 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG0x00000004 << 1);
1580}
1581
1582/**
1583 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1584 *
1585 * @ring: amdgpu_ring pointer
1586 *
1587 * Returns the current hardware enc read pointer
1588 */
1589static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1590{
1591 struct amdgpu_device *adev = ring->adev;
1592
1593 if (ring == &adev->vcn.inst->ring_enc[0])
1594 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x0429), 0)
;
1595 else
1596 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x0424), 0)
;
1597}
1598
1599 /**
1600 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1601 *
1602 * @ring: amdgpu_ring pointer
1603 *
1604 * Returns the current hardware enc write pointer
1605 */
1606static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1607{
1608 struct amdgpu_device *adev = ring->adev;
1609
1610 if (ring == &adev->vcn.inst->ring_enc[0])
1611 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x042a), 0)
;
1612 else
1613 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2)amdgpu_device_rreg(adev, (adev->reg_offset[UVD_HWIP][0][1]
+ 0x0425), 0)
;
1614}
1615
1616 /**
1617 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1618 *
1619 * @ring: amdgpu_ring pointer
1620 *
1621 * Commits the enc write pointer to the hardware
1622 */
1623static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1624{
1625 struct amdgpu_device *adev = ring->adev;
1626
1627 if (ring == &adev->vcn.inst->ring_enc[0])
1628 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x042a)), (((u32)(ring->wptr))), 0)
1629 lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x042a)), (((u32)(ring->wptr))), 0)
;
1630 else
1631 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0425)), (((u32)(ring->wptr))), 0)
1632 lower_32_bits(ring->wptr))amdgpu_device_wreg(adev, ((adev->reg_offset[UVD_HWIP][0][1
] + 0x0425)), (((u32)(ring->wptr))), 0)
;
1633}
1634
1635/**
1636 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1637 *
1638 * @ring: amdgpu_ring pointer
1639 * @fence: fence to emit
1640 *
1641 * Write enc a fence and a trap command to the ring.
1642 */
1643static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1644 u64 seq, unsigned flags)
1645{
1646 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT)({ int __ret = !!(flags & (1 << 0)); if (__ret) printf
("WARNING %s failed at %s:%d\n", "flags & (1 << 0)"
, "/usr/src/sys/dev/pci/drm/amd/amdgpu/vcn_v1_0.c", 1646); __builtin_expect
(!!(__ret), 0); })
;
1647
1648 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE0x00000003);
1649 amdgpu_ring_write(ring, addr);
1650 amdgpu_ring_write(ring, upper_32_bits(addr)((u32)(((addr) >> 16) >> 16)));
1651 amdgpu_ring_write(ring, seq);
1652 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP0x00000004);
1653}
1654
1655static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1656{
1657 amdgpu_ring_write(ring, VCN_ENC_CMD_END0x00000001);
1658}
1659
1660/**
1661 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1662 *
1663 * @ring: amdgpu_ring pointer
1664 * @ib: indirect buffer to execute
1665 *
1666 * Write enc ring commands to execute the indirect buffer
1667 */
1668static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1669 struct amdgpu_job *job,
1670 struct amdgpu_ib *ib,
1671 uint32_t flags)
1672{
1673 unsigned vmid = AMDGPU_JOB_GET_VMID(job)((job) ? (job)->vmid : 0);
1674
1675 amdgpu_ring_write(ring, VCN_ENC_CMD_IB0x00000002);
1676 amdgpu_ring_write(ring, vmid);
1677 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)((u32)(ib->gpu_addr)));
1678 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)((u32)(((ib->gpu_addr) >> 16) >> 16)));
1679 amdgpu_ring_write(ring, ib->length_dw);
1680}
1681
1682static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1683 uint32_t reg, uint32_t val,
1684 uint32_t mask)
1685{
1686 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT0x0000000c);
1687 amdgpu_ring_write(ring, reg << 2);
1688 amdgpu_ring_write(ring, mask);
1689 amdgpu_ring_write(ring, val);
1690}
1691
1692static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1693 unsigned int vmid, uint64_t pd_addr)
1694{
1695 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1696
1697 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr)(ring)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((ring
), (vmid), (pd_addr))
;
1698
1699 /* wait for reg writes */
1700 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1701 vmid * hub->ctx_addr_distance,
1702 lower_32_bits(pd_addr)((u32)(pd_addr)), 0xffffffff);
1703}
1704
1705static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1706 uint32_t reg, uint32_t val)
1707{
1708 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE0x0000000b);
1709 amdgpu_ring_write(ring, reg << 2);
1710 amdgpu_ring_write(ring, val);
1711}
1712
1713static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1714 struct amdgpu_irq_src *source,
1715 unsigned type,
1716 enum amdgpu_interrupt_state state)
1717{
1718 return 0;
1719}
1720
1721static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1722 struct amdgpu_irq_src *source,
1723 struct amdgpu_iv_entry *entry)
1724{
1725 DRM_DEBUG("IH: VCN TRAP\n")__drm_dbg(DRM_UT_CORE, "IH: VCN TRAP\n");
1726
1727 switch (entry->src_id) {
1728 case 124:
1729 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1730 break;
1731 case 119:
1732 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1733 break;
1734 case 120:
1735 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1736 break;
1737 default:
1738 DRM_ERROR("Unhandled interrupt: %d %d\n",__drm_err("Unhandled interrupt: %d %d\n", entry->src_id, entry
->src_data[0])
1739 entry->src_id, entry->src_data[0])__drm_err("Unhandled interrupt: %d %d\n", entry->src_id, entry
->src_data[0])
;
1740 break;
1741 }
1742
1743 return 0;
1744}
1745
1746static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1747{
1748 struct amdgpu_device *adev = ring->adev;
1749 int i;
1750
1751 WARN_ON(ring->wptr % 2 || count % 2)({ int __ret = !!(ring->wptr % 2 || count % 2); if (__ret)
printf("WARNING %s failed at %s:%d\n", "ring->wptr % 2 || count % 2"
, "/usr/src/sys/dev/pci/drm/amd/amdgpu/vcn_v1_0.c", 1751); __builtin_expect
(!!(__ret), 0); })
;
1752
1753 for (i = 0; i < count / 2; i++) {
1754 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0)((0 << 30) | (((adev->reg_offset[UVD_HWIP][0][1] + 0x03ff
)) & 0xFFFF) | ((0) & 0x3FFF) << 16)
);
1755 amdgpu_ring_write(ring, 0);
1756 }
1757}
1758
1759static int vcn_v1_0_set_powergating_state(void *handle,
1760 enum amd_powergating_state state)
1761{
1762 /* This doesn't actually powergate the VCN block.
1763 * That's done in the dpm code via the SMC. This
1764 * just re-inits the block as necessary. The actual
1765 * gating still happens in the dpm code. We should
1766 * revisit this when there is a cleaner line between
1767 * the smc and the hw blocks
1768 */
1769 int ret;
1770 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1771
1772 if(state == adev->vcn.cur_state)
1773 return 0;
1774
1775 if (state == AMD_PG_STATE_GATE)
1776 ret = vcn_v1_0_stop(adev);
1777 else
1778 ret = vcn_v1_0_start(adev);
1779
1780 if(!ret)
1781 adev->vcn.cur_state = state;
1782 return ret;
1783}
1784
1785static void vcn_v1_0_idle_work_handler(struct work_struct *work)
1786{
1787 struct amdgpu_device *adev =
1788 container_of(work, struct amdgpu_device, vcn.idle_work.work)({ const __typeof( ((struct amdgpu_device *)0)->vcn.idle_work
.work ) *__mptr = (work); (struct amdgpu_device *)( (char *)__mptr
- __builtin_offsetof(struct amdgpu_device, vcn.idle_work.work
) );})
;
1789 unsigned int fences = 0, i;
1790
1791 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1792 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1793
1794 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15)) {
1795 struct dpg_pause_state new_state;
1796
1797 if (fences)
1798 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1799 else
1800 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1801
1802 if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
1803 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1804 else
1805 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1806
1807 adev->vcn.pause_dpg_mode(adev, 0, &new_state);
1808 }
1809
1810 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec);
1811 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
1812
1813 if (fences == 0) {
1814 amdgpu_gfx_off_ctrl(adev, true1);
1815 if (adev->pm.dpm_enabled)
1816 amdgpu_dpm_enable_uvd(adev, false0);
1817 else
1818 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
1819 AMD_PG_STATE_GATE);
1820 } else {
1821 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT(((uint64_t)(1000)) * hz / 1000));
1822 }
1823}
1824
1825static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
1826{
1827 struct amdgpu_device *adev = ring->adev;
1828 bool_Bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
1829
1830 mutex_lock(&adev->vcn.vcn1_jpeg1_workaround)rw_enter_write(&adev->vcn.vcn1_jpeg1_workaround);
1831
1832 if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec))
1833 DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n")__drm_err("VCN dec: jpeg dec ring may not be empty\n");
1834
1835 vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
1836
1837}
1838
1839void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool_Bool set_clocks)
1840{
1841 struct amdgpu_device *adev = ring->adev;
1842
1843 if (set_clocks) {
1844 amdgpu_gfx_off_ctrl(adev, false0);
1845 if (adev->pm.dpm_enabled)
1846 amdgpu_dpm_enable_uvd(adev, true1);
1847 else
1848 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
1849 AMD_PG_STATE_UNGATE);
1850 }
1851
1852 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG(1 << 15)) {
1853 struct dpg_pause_state new_state;
1854 unsigned int fences = 0, i;
1855
1856 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1857 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1858
1859 if (fences)
1860 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1861 else
1862 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1863
1864 if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
1865 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1866 else
1867 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1868
1869 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
1870 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1871 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
1872 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1873
1874 adev->vcn.pause_dpg_mode(adev, 0, &new_state);
1875 }
1876}
1877
1878void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
1879{
1880 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT(((uint64_t)(1000)) * hz / 1000));
1881 mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround)rw_exit_write(&ring->adev->vcn.vcn1_jpeg1_workaround
)
;
1882}
1883
1884static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
1885 .name = "vcn_v1_0",
1886 .early_init = vcn_v1_0_early_init,
1887 .late_init = NULL((void *)0),
1888 .sw_init = vcn_v1_0_sw_init,
1889 .sw_fini = vcn_v1_0_sw_fini,
1890 .hw_init = vcn_v1_0_hw_init,
1891 .hw_fini = vcn_v1_0_hw_fini,
1892 .suspend = vcn_v1_0_suspend,
1893 .resume = vcn_v1_0_resume,
1894 .is_idle = vcn_v1_0_is_idle,
1895 .wait_for_idle = vcn_v1_0_wait_for_idle,
1896 .check_soft_reset = NULL((void *)0) /* vcn_v1_0_check_soft_reset */,
1897 .pre_soft_reset = NULL((void *)0) /* vcn_v1_0_pre_soft_reset */,
1898 .soft_reset = NULL((void *)0) /* vcn_v1_0_soft_reset */,
1899 .post_soft_reset = NULL((void *)0) /* vcn_v1_0_post_soft_reset */,
1900 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
1901 .set_powergating_state = vcn_v1_0_set_powergating_state,
1902};
1903
1904static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1905 .type = AMDGPU_RING_TYPE_VCN_DEC,
1906 .align_mask = 0xf,
1907 .support_64bit_ptrs = false0,
1908 .no_user_fence = true1,
1909 .vmhub = AMDGPU_MMHUB_01,
1910 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
1911 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
1912 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
1913 .emit_frame_size =
1914 6 + 6 + /* hdp invalidate / flush */
1915 SOC15_FLUSH_GPU_TLB_NUM_WREG6 * 6 +
1916 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT3 * 8 +
1917 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1918 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1919 6,
1920 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
1921 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
1922 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
1923 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
1924 .test_ring = amdgpu_vcn_dec_ring_test_ring,
1925 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1926 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
1927 .insert_start = vcn_v1_0_dec_ring_insert_start,
1928 .insert_end = vcn_v1_0_dec_ring_insert_end,
1929 .pad_ib = amdgpu_ring_generic_pad_ib,
1930 .begin_use = vcn_v1_0_ring_begin_use,
1931 .end_use = vcn_v1_0_ring_end_use,
1932 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
1933 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
1934 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1935};
1936
1937static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
1938 .type = AMDGPU_RING_TYPE_VCN_ENC,
1939 .align_mask = 0x3f,
1940 .nop = VCN_ENC_CMD_NO_OP0x00000000,
1941 .support_64bit_ptrs = false0,
1942 .no_user_fence = true1,
1943 .vmhub = AMDGPU_MMHUB_01,
1944 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
1945 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
1946 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
1947 .emit_frame_size =
1948 SOC15_FLUSH_GPU_TLB_NUM_WREG6 * 3 +
1949 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT3 * 4 +
1950 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
1951 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1952 1, /* vcn_v1_0_enc_ring_insert_end */
1953 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
1954 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
1955 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
1956 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
1957 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1958 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1959 .insert_nop = amdgpu_ring_insert_nop,
1960 .insert_end = vcn_v1_0_enc_ring_insert_end,
1961 .pad_ib = amdgpu_ring_generic_pad_ib,
1962 .begin_use = vcn_v1_0_ring_begin_use,
1963 .end_use = vcn_v1_0_ring_end_use,
1964 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
1965 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
1966 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1967};
1968
1969static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
1970{
1971 adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
1972 DRM_INFO("VCN decode is enabled in VM mode\n")printk("\0016" "[" "drm" "] " "VCN decode is enabled in VM mode\n"
)
;
1973}
1974
1975static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1976{
1977 int i;
1978
1979 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1980 adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
1981
1982 DRM_INFO("VCN encode is enabled in VM mode\n")printk("\0016" "[" "drm" "] " "VCN encode is enabled in VM mode\n"
)
;
1983}
1984
1985static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
1986 .set = vcn_v1_0_set_interrupt_state,
1987 .process = vcn_v1_0_process_interrupt,
1988};
1989
1990static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
1991{
1992 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
1993 adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
1994}
1995
1996const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
1997{
1998 .type = AMD_IP_BLOCK_TYPE_VCN,
1999 .major = 1,
2000 .minor = 0,
2001 .rev = 0,
2002 .funcs = &vcn_v1_0_ip_funcs,
2003};