Bug Summary

File:dev/pci/drm/radeon/si.c
Warning:line 1925, column 14
Access to field 'size' results in a dereference of a null pointer (loaded from field 'smc_fw')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name si.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/radeon/si.c

/usr/src/sys/dev/pci/drm/radeon/si.c

1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <linux/firmware.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28#include <linux/slab.h>
29
30#include <drm/drm_vblank.h>
31#include <drm/radeon_drm.h>
32
33#include "atom.h"
34#include "clearstate_si.h"
35#include "radeon.h"
36#include "radeon_asic.h"
37#include "radeon_audio.h"
38#include "radeon_ucode.h"
39#include "si_blit_shaders.h"
40#include "sid.h"
41
42
43MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
44MODULE_FIRMWARE("radeon/TAHITI_me.bin");
45MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
46MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
47MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
48MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
49MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
50
51MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
52MODULE_FIRMWARE("radeon/tahiti_me.bin");
53MODULE_FIRMWARE("radeon/tahiti_ce.bin");
54MODULE_FIRMWARE("radeon/tahiti_mc.bin");
55MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
56MODULE_FIRMWARE("radeon/tahiti_smc.bin");
57
58MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
59MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
60MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
61MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
62MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
63MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
64MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
65
66MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
67MODULE_FIRMWARE("radeon/pitcairn_me.bin");
68MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
69MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
70MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
71MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
72MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
73
74MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
75MODULE_FIRMWARE("radeon/VERDE_me.bin");
76MODULE_FIRMWARE("radeon/VERDE_ce.bin");
77MODULE_FIRMWARE("radeon/VERDE_mc.bin");
78MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
79MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
80MODULE_FIRMWARE("radeon/VERDE_smc.bin");
81
82MODULE_FIRMWARE("radeon/verde_pfp.bin");
83MODULE_FIRMWARE("radeon/verde_me.bin");
84MODULE_FIRMWARE("radeon/verde_ce.bin");
85MODULE_FIRMWARE("radeon/verde_mc.bin");
86MODULE_FIRMWARE("radeon/verde_rlc.bin");
87MODULE_FIRMWARE("radeon/verde_smc.bin");
88MODULE_FIRMWARE("radeon/verde_k_smc.bin");
89
90MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
91MODULE_FIRMWARE("radeon/OLAND_me.bin");
92MODULE_FIRMWARE("radeon/OLAND_ce.bin");
93MODULE_FIRMWARE("radeon/OLAND_mc.bin");
94MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
95MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
96MODULE_FIRMWARE("radeon/OLAND_smc.bin");
97
98MODULE_FIRMWARE("radeon/oland_pfp.bin");
99MODULE_FIRMWARE("radeon/oland_me.bin");
100MODULE_FIRMWARE("radeon/oland_ce.bin");
101MODULE_FIRMWARE("radeon/oland_mc.bin");
102MODULE_FIRMWARE("radeon/oland_rlc.bin");
103MODULE_FIRMWARE("radeon/oland_smc.bin");
104MODULE_FIRMWARE("radeon/oland_k_smc.bin");
105
106MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
107MODULE_FIRMWARE("radeon/HAINAN_me.bin");
108MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
109MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
110MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
111MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
112MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
113
114MODULE_FIRMWARE("radeon/hainan_pfp.bin");
115MODULE_FIRMWARE("radeon/hainan_me.bin");
116MODULE_FIRMWARE("radeon/hainan_ce.bin");
117MODULE_FIRMWARE("radeon/hainan_mc.bin");
118MODULE_FIRMWARE("radeon/hainan_rlc.bin");
119MODULE_FIRMWARE("radeon/hainan_smc.bin");
120MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
121MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
122
123MODULE_FIRMWARE("radeon/si58_mc.bin");
124
125static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
126static void si_pcie_gen3_enable(struct radeon_device *rdev);
127static void si_program_aspm(struct radeon_device *rdev);
128extern void sumo_rlc_fini(struct radeon_device *rdev);
129extern int sumo_rlc_init(struct radeon_device *rdev);
130extern int r600_ih_ring_alloc(struct radeon_device *rdev);
131extern void r600_ih_ring_fini(struct radeon_device *rdev);
132extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
133extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
134extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
135extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
136extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
137extern bool_Bool evergreen_is_display_hung(struct radeon_device *rdev);
138static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
139 bool_Bool enable);
140static void si_init_pg(struct radeon_device *rdev);
141static void si_init_cg(struct radeon_device *rdev);
142static void si_fini_pg(struct radeon_device *rdev);
143static void si_fini_cg(struct radeon_device *rdev);
144static void si_rlc_stop(struct radeon_device *rdev);
145
146static const u32 crtc_offsets[] =
147{
148 EVERGREEN_CRTC0_REGISTER_OFFSET(0x6df0 - 0x6df0),
149 EVERGREEN_CRTC1_REGISTER_OFFSET(0x79f0 - 0x6df0),
150 EVERGREEN_CRTC2_REGISTER_OFFSET(0x105f0 - 0x6df0),
151 EVERGREEN_CRTC3_REGISTER_OFFSET(0x111f0 - 0x6df0),
152 EVERGREEN_CRTC4_REGISTER_OFFSET(0x11df0 - 0x6df0),
153 EVERGREEN_CRTC5_REGISTER_OFFSET(0x129f0 - 0x6df0)
154};
155
156static const u32 si_disp_int_status[] =
157{
158 DISP_INTERRUPT_STATUS0x60f4,
159 DISP_INTERRUPT_STATUS_CONTINUE0x60f8,
160 DISP_INTERRUPT_STATUS_CONTINUE20x60fc,
161 DISP_INTERRUPT_STATUS_CONTINUE30x6100,
162 DISP_INTERRUPT_STATUS_CONTINUE40x614c,
163 DISP_INTERRUPT_STATUS_CONTINUE50x6150
164};
165
166#define DC_HPDx_CONTROL(x)(0x6024 + (x * 0xc)) (DC_HPD1_CONTROL0x6024 + (x * 0xc))
167#define DC_HPDx_INT_CONTROL(x)(0x6020 + (x * 0xc)) (DC_HPD1_INT_CONTROL0x6020 + (x * 0xc))
168#define DC_HPDx_INT_STATUS_REG(x)(0x601c + (x * 0xc)) (DC_HPD1_INT_STATUS0x601c + (x * 0xc))
169
170static const u32 verde_rlc_save_restore_register_list[] =
171{
172 (0x8000 << 16) | (0x98f4 >> 2),
173 0x00000000,
174 (0x8040 << 16) | (0x98f4 >> 2),
175 0x00000000,
176 (0x8000 << 16) | (0xe80 >> 2),
177 0x00000000,
178 (0x8040 << 16) | (0xe80 >> 2),
179 0x00000000,
180 (0x8000 << 16) | (0x89bc >> 2),
181 0x00000000,
182 (0x8040 << 16) | (0x89bc >> 2),
183 0x00000000,
184 (0x8000 << 16) | (0x8c1c >> 2),
185 0x00000000,
186 (0x8040 << 16) | (0x8c1c >> 2),
187 0x00000000,
188 (0x9c00 << 16) | (0x98f0 >> 2),
189 0x00000000,
190 (0x9c00 << 16) | (0xe7c >> 2),
191 0x00000000,
192 (0x8000 << 16) | (0x9148 >> 2),
193 0x00000000,
194 (0x8040 << 16) | (0x9148 >> 2),
195 0x00000000,
196 (0x9c00 << 16) | (0x9150 >> 2),
197 0x00000000,
198 (0x9c00 << 16) | (0x897c >> 2),
199 0x00000000,
200 (0x9c00 << 16) | (0x8d8c >> 2),
201 0x00000000,
202 (0x9c00 << 16) | (0xac54 >> 2),
203 0X00000000,
204 0x3,
205 (0x9c00 << 16) | (0x98f8 >> 2),
206 0x00000000,
207 (0x9c00 << 16) | (0x9910 >> 2),
208 0x00000000,
209 (0x9c00 << 16) | (0x9914 >> 2),
210 0x00000000,
211 (0x9c00 << 16) | (0x9918 >> 2),
212 0x00000000,
213 (0x9c00 << 16) | (0x991c >> 2),
214 0x00000000,
215 (0x9c00 << 16) | (0x9920 >> 2),
216 0x00000000,
217 (0x9c00 << 16) | (0x9924 >> 2),
218 0x00000000,
219 (0x9c00 << 16) | (0x9928 >> 2),
220 0x00000000,
221 (0x9c00 << 16) | (0x992c >> 2),
222 0x00000000,
223 (0x9c00 << 16) | (0x9930 >> 2),
224 0x00000000,
225 (0x9c00 << 16) | (0x9934 >> 2),
226 0x00000000,
227 (0x9c00 << 16) | (0x9938 >> 2),
228 0x00000000,
229 (0x9c00 << 16) | (0x993c >> 2),
230 0x00000000,
231 (0x9c00 << 16) | (0x9940 >> 2),
232 0x00000000,
233 (0x9c00 << 16) | (0x9944 >> 2),
234 0x00000000,
235 (0x9c00 << 16) | (0x9948 >> 2),
236 0x00000000,
237 (0x9c00 << 16) | (0x994c >> 2),
238 0x00000000,
239 (0x9c00 << 16) | (0x9950 >> 2),
240 0x00000000,
241 (0x9c00 << 16) | (0x9954 >> 2),
242 0x00000000,
243 (0x9c00 << 16) | (0x9958 >> 2),
244 0x00000000,
245 (0x9c00 << 16) | (0x995c >> 2),
246 0x00000000,
247 (0x9c00 << 16) | (0x9960 >> 2),
248 0x00000000,
249 (0x9c00 << 16) | (0x9964 >> 2),
250 0x00000000,
251 (0x9c00 << 16) | (0x9968 >> 2),
252 0x00000000,
253 (0x9c00 << 16) | (0x996c >> 2),
254 0x00000000,
255 (0x9c00 << 16) | (0x9970 >> 2),
256 0x00000000,
257 (0x9c00 << 16) | (0x9974 >> 2),
258 0x00000000,
259 (0x9c00 << 16) | (0x9978 >> 2),
260 0x00000000,
261 (0x9c00 << 16) | (0x997c >> 2),
262 0x00000000,
263 (0x9c00 << 16) | (0x9980 >> 2),
264 0x00000000,
265 (0x9c00 << 16) | (0x9984 >> 2),
266 0x00000000,
267 (0x9c00 << 16) | (0x9988 >> 2),
268 0x00000000,
269 (0x9c00 << 16) | (0x998c >> 2),
270 0x00000000,
271 (0x9c00 << 16) | (0x8c00 >> 2),
272 0x00000000,
273 (0x9c00 << 16) | (0x8c14 >> 2),
274 0x00000000,
275 (0x9c00 << 16) | (0x8c04 >> 2),
276 0x00000000,
277 (0x9c00 << 16) | (0x8c08 >> 2),
278 0x00000000,
279 (0x8000 << 16) | (0x9b7c >> 2),
280 0x00000000,
281 (0x8040 << 16) | (0x9b7c >> 2),
282 0x00000000,
283 (0x8000 << 16) | (0xe84 >> 2),
284 0x00000000,
285 (0x8040 << 16) | (0xe84 >> 2),
286 0x00000000,
287 (0x8000 << 16) | (0x89c0 >> 2),
288 0x00000000,
289 (0x8040 << 16) | (0x89c0 >> 2),
290 0x00000000,
291 (0x8000 << 16) | (0x914c >> 2),
292 0x00000000,
293 (0x8040 << 16) | (0x914c >> 2),
294 0x00000000,
295 (0x8000 << 16) | (0x8c20 >> 2),
296 0x00000000,
297 (0x8040 << 16) | (0x8c20 >> 2),
298 0x00000000,
299 (0x8000 << 16) | (0x9354 >> 2),
300 0x00000000,
301 (0x8040 << 16) | (0x9354 >> 2),
302 0x00000000,
303 (0x9c00 << 16) | (0x9060 >> 2),
304 0x00000000,
305 (0x9c00 << 16) | (0x9364 >> 2),
306 0x00000000,
307 (0x9c00 << 16) | (0x9100 >> 2),
308 0x00000000,
309 (0x9c00 << 16) | (0x913c >> 2),
310 0x00000000,
311 (0x8000 << 16) | (0x90e0 >> 2),
312 0x00000000,
313 (0x8000 << 16) | (0x90e4 >> 2),
314 0x00000000,
315 (0x8000 << 16) | (0x90e8 >> 2),
316 0x00000000,
317 (0x8040 << 16) | (0x90e0 >> 2),
318 0x00000000,
319 (0x8040 << 16) | (0x90e4 >> 2),
320 0x00000000,
321 (0x8040 << 16) | (0x90e8 >> 2),
322 0x00000000,
323 (0x9c00 << 16) | (0x8bcc >> 2),
324 0x00000000,
325 (0x9c00 << 16) | (0x8b24 >> 2),
326 0x00000000,
327 (0x9c00 << 16) | (0x88c4 >> 2),
328 0x00000000,
329 (0x9c00 << 16) | (0x8e50 >> 2),
330 0x00000000,
331 (0x9c00 << 16) | (0x8c0c >> 2),
332 0x00000000,
333 (0x9c00 << 16) | (0x8e58 >> 2),
334 0x00000000,
335 (0x9c00 << 16) | (0x8e5c >> 2),
336 0x00000000,
337 (0x9c00 << 16) | (0x9508 >> 2),
338 0x00000000,
339 (0x9c00 << 16) | (0x950c >> 2),
340 0x00000000,
341 (0x9c00 << 16) | (0x9494 >> 2),
342 0x00000000,
343 (0x9c00 << 16) | (0xac0c >> 2),
344 0x00000000,
345 (0x9c00 << 16) | (0xac10 >> 2),
346 0x00000000,
347 (0x9c00 << 16) | (0xac14 >> 2),
348 0x00000000,
349 (0x9c00 << 16) | (0xae00 >> 2),
350 0x00000000,
351 (0x9c00 << 16) | (0xac08 >> 2),
352 0x00000000,
353 (0x9c00 << 16) | (0x88d4 >> 2),
354 0x00000000,
355 (0x9c00 << 16) | (0x88c8 >> 2),
356 0x00000000,
357 (0x9c00 << 16) | (0x88cc >> 2),
358 0x00000000,
359 (0x9c00 << 16) | (0x89b0 >> 2),
360 0x00000000,
361 (0x9c00 << 16) | (0x8b10 >> 2),
362 0x00000000,
363 (0x9c00 << 16) | (0x8a14 >> 2),
364 0x00000000,
365 (0x9c00 << 16) | (0x9830 >> 2),
366 0x00000000,
367 (0x9c00 << 16) | (0x9834 >> 2),
368 0x00000000,
369 (0x9c00 << 16) | (0x9838 >> 2),
370 0x00000000,
371 (0x9c00 << 16) | (0x9a10 >> 2),
372 0x00000000,
373 (0x8000 << 16) | (0x9870 >> 2),
374 0x00000000,
375 (0x8000 << 16) | (0x9874 >> 2),
376 0x00000000,
377 (0x8001 << 16) | (0x9870 >> 2),
378 0x00000000,
379 (0x8001 << 16) | (0x9874 >> 2),
380 0x00000000,
381 (0x8040 << 16) | (0x9870 >> 2),
382 0x00000000,
383 (0x8040 << 16) | (0x9874 >> 2),
384 0x00000000,
385 (0x8041 << 16) | (0x9870 >> 2),
386 0x00000000,
387 (0x8041 << 16) | (0x9874 >> 2),
388 0x00000000,
389 0x00000000
390};
391
392static const u32 tahiti_golden_rlc_registers[] =
393{
394 0xc424, 0xffffffff, 0x00601005,
395 0xc47c, 0xffffffff, 0x10104040,
396 0xc488, 0xffffffff, 0x0100000a,
397 0xc314, 0xffffffff, 0x00000800,
398 0xc30c, 0xffffffff, 0x800000f4,
399 0xf4a8, 0xffffffff, 0x00000000
400};
401
402static const u32 tahiti_golden_registers[] =
403{
404 0x9a10, 0x00010000, 0x00018208,
405 0x9830, 0xffffffff, 0x00000000,
406 0x9834, 0xf00fffff, 0x00000400,
407 0x9838, 0x0002021c, 0x00020200,
408 0xc78, 0x00000080, 0x00000000,
409 0xd030, 0x000300c0, 0x00800040,
410 0xd830, 0x000300c0, 0x00800040,
411 0x5bb0, 0x000000f0, 0x00000070,
412 0x5bc0, 0x00200000, 0x50100000,
413 0x7030, 0x31000311, 0x00000011,
414 0x277c, 0x00000003, 0x000007ff,
415 0x240c, 0x000007ff, 0x00000000,
416 0x8a14, 0xf000001f, 0x00000007,
417 0x8b24, 0xffffffff, 0x00ffffff,
418 0x8b10, 0x0000ff0f, 0x00000000,
419 0x28a4c, 0x07ffffff, 0x4e000000,
420 0x28350, 0x3f3f3fff, 0x2a00126a,
421 0x30, 0x000000ff, 0x0040,
422 0x34, 0x00000040, 0x00004040,
423 0x9100, 0x07ffffff, 0x03000000,
424 0x8e88, 0x01ff1f3f, 0x00000000,
425 0x8e84, 0x01ff1f3f, 0x00000000,
426 0x9060, 0x0000007f, 0x00000020,
427 0x9508, 0x00010000, 0x00010000,
428 0xac14, 0x00000200, 0x000002fb,
429 0xac10, 0xffffffff, 0x0000543b,
430 0xac0c, 0xffffffff, 0xa9210876,
431 0x88d0, 0xffffffff, 0x000fff40,
432 0x88d4, 0x0000001f, 0x00000010,
433 0x1410, 0x20000000, 0x20fffed8,
434 0x15c0, 0x000c0fc0, 0x000c0400
435};
436
437static const u32 tahiti_golden_registers2[] =
438{
439 0xc64, 0x00000001, 0x00000001
440};
441
442static const u32 pitcairn_golden_rlc_registers[] =
443{
444 0xc424, 0xffffffff, 0x00601004,
445 0xc47c, 0xffffffff, 0x10102020,
446 0xc488, 0xffffffff, 0x01000020,
447 0xc314, 0xffffffff, 0x00000800,
448 0xc30c, 0xffffffff, 0x800000a4
449};
450
451static const u32 pitcairn_golden_registers[] =
452{
453 0x9a10, 0x00010000, 0x00018208,
454 0x9830, 0xffffffff, 0x00000000,
455 0x9834, 0xf00fffff, 0x00000400,
456 0x9838, 0x0002021c, 0x00020200,
457 0xc78, 0x00000080, 0x00000000,
458 0xd030, 0x000300c0, 0x00800040,
459 0xd830, 0x000300c0, 0x00800040,
460 0x5bb0, 0x000000f0, 0x00000070,
461 0x5bc0, 0x00200000, 0x50100000,
462 0x7030, 0x31000311, 0x00000011,
463 0x2ae4, 0x00073ffe, 0x000022a2,
464 0x240c, 0x000007ff, 0x00000000,
465 0x8a14, 0xf000001f, 0x00000007,
466 0x8b24, 0xffffffff, 0x00ffffff,
467 0x8b10, 0x0000ff0f, 0x00000000,
468 0x28a4c, 0x07ffffff, 0x4e000000,
469 0x28350, 0x3f3f3fff, 0x2a00126a,
470 0x30, 0x000000ff, 0x0040,
471 0x34, 0x00000040, 0x00004040,
472 0x9100, 0x07ffffff, 0x03000000,
473 0x9060, 0x0000007f, 0x00000020,
474 0x9508, 0x00010000, 0x00010000,
475 0xac14, 0x000003ff, 0x000000f7,
476 0xac10, 0xffffffff, 0x00000000,
477 0xac0c, 0xffffffff, 0x32761054,
478 0x88d4, 0x0000001f, 0x00000010,
479 0x15c0, 0x000c0fc0, 0x000c0400
480};
481
482static const u32 verde_golden_rlc_registers[] =
483{
484 0xc424, 0xffffffff, 0x033f1005,
485 0xc47c, 0xffffffff, 0x10808020,
486 0xc488, 0xffffffff, 0x00800008,
487 0xc314, 0xffffffff, 0x00001000,
488 0xc30c, 0xffffffff, 0x80010014
489};
490
491static const u32 verde_golden_registers[] =
492{
493 0x9a10, 0x00010000, 0x00018208,
494 0x9830, 0xffffffff, 0x00000000,
495 0x9834, 0xf00fffff, 0x00000400,
496 0x9838, 0x0002021c, 0x00020200,
497 0xc78, 0x00000080, 0x00000000,
498 0xd030, 0x000300c0, 0x00800040,
499 0xd030, 0x000300c0, 0x00800040,
500 0xd830, 0x000300c0, 0x00800040,
501 0xd830, 0x000300c0, 0x00800040,
502 0x5bb0, 0x000000f0, 0x00000070,
503 0x5bc0, 0x00200000, 0x50100000,
504 0x7030, 0x31000311, 0x00000011,
505 0x2ae4, 0x00073ffe, 0x000022a2,
506 0x2ae4, 0x00073ffe, 0x000022a2,
507 0x2ae4, 0x00073ffe, 0x000022a2,
508 0x240c, 0x000007ff, 0x00000000,
509 0x240c, 0x000007ff, 0x00000000,
510 0x240c, 0x000007ff, 0x00000000,
511 0x8a14, 0xf000001f, 0x00000007,
512 0x8a14, 0xf000001f, 0x00000007,
513 0x8a14, 0xf000001f, 0x00000007,
514 0x8b24, 0xffffffff, 0x00ffffff,
515 0x8b10, 0x0000ff0f, 0x00000000,
516 0x28a4c, 0x07ffffff, 0x4e000000,
517 0x28350, 0x3f3f3fff, 0x0000124a,
518 0x28350, 0x3f3f3fff, 0x0000124a,
519 0x28350, 0x3f3f3fff, 0x0000124a,
520 0x30, 0x000000ff, 0x0040,
521 0x34, 0x00000040, 0x00004040,
522 0x9100, 0x07ffffff, 0x03000000,
523 0x9100, 0x07ffffff, 0x03000000,
524 0x8e88, 0x01ff1f3f, 0x00000000,
525 0x8e88, 0x01ff1f3f, 0x00000000,
526 0x8e88, 0x01ff1f3f, 0x00000000,
527 0x8e84, 0x01ff1f3f, 0x00000000,
528 0x8e84, 0x01ff1f3f, 0x00000000,
529 0x8e84, 0x01ff1f3f, 0x00000000,
530 0x9060, 0x0000007f, 0x00000020,
531 0x9508, 0x00010000, 0x00010000,
532 0xac14, 0x000003ff, 0x00000003,
533 0xac14, 0x000003ff, 0x00000003,
534 0xac14, 0x000003ff, 0x00000003,
535 0xac10, 0xffffffff, 0x00000000,
536 0xac10, 0xffffffff, 0x00000000,
537 0xac10, 0xffffffff, 0x00000000,
538 0xac0c, 0xffffffff, 0x00001032,
539 0xac0c, 0xffffffff, 0x00001032,
540 0xac0c, 0xffffffff, 0x00001032,
541 0x88d4, 0x0000001f, 0x00000010,
542 0x88d4, 0x0000001f, 0x00000010,
543 0x88d4, 0x0000001f, 0x00000010,
544 0x15c0, 0x000c0fc0, 0x000c0400
545};
546
547static const u32 oland_golden_rlc_registers[] =
548{
549 0xc424, 0xffffffff, 0x00601005,
550 0xc47c, 0xffffffff, 0x10104040,
551 0xc488, 0xffffffff, 0x0100000a,
552 0xc314, 0xffffffff, 0x00000800,
553 0xc30c, 0xffffffff, 0x800000f4
554};
555
556static const u32 oland_golden_registers[] =
557{
558 0x9a10, 0x00010000, 0x00018208,
559 0x9830, 0xffffffff, 0x00000000,
560 0x9834, 0xf00fffff, 0x00000400,
561 0x9838, 0x0002021c, 0x00020200,
562 0xc78, 0x00000080, 0x00000000,
563 0xd030, 0x000300c0, 0x00800040,
564 0xd830, 0x000300c0, 0x00800040,
565 0x5bb0, 0x000000f0, 0x00000070,
566 0x5bc0, 0x00200000, 0x50100000,
567 0x7030, 0x31000311, 0x00000011,
568 0x2ae4, 0x00073ffe, 0x000022a2,
569 0x240c, 0x000007ff, 0x00000000,
570 0x8a14, 0xf000001f, 0x00000007,
571 0x8b24, 0xffffffff, 0x00ffffff,
572 0x8b10, 0x0000ff0f, 0x00000000,
573 0x28a4c, 0x07ffffff, 0x4e000000,
574 0x28350, 0x3f3f3fff, 0x00000082,
575 0x30, 0x000000ff, 0x0040,
576 0x34, 0x00000040, 0x00004040,
577 0x9100, 0x07ffffff, 0x03000000,
578 0x9060, 0x0000007f, 0x00000020,
579 0x9508, 0x00010000, 0x00010000,
580 0xac14, 0x000003ff, 0x000000f3,
581 0xac10, 0xffffffff, 0x00000000,
582 0xac0c, 0xffffffff, 0x00003210,
583 0x88d4, 0x0000001f, 0x00000010,
584 0x15c0, 0x000c0fc0, 0x000c0400
585};
586
587static const u32 hainan_golden_registers[] =
588{
589 0x9a10, 0x00010000, 0x00018208,
590 0x9830, 0xffffffff, 0x00000000,
591 0x9834, 0xf00fffff, 0x00000400,
592 0x9838, 0x0002021c, 0x00020200,
593 0xd0c0, 0xff000fff, 0x00000100,
594 0xd030, 0x000300c0, 0x00800040,
595 0xd8c0, 0xff000fff, 0x00000100,
596 0xd830, 0x000300c0, 0x00800040,
597 0x2ae4, 0x00073ffe, 0x000022a2,
598 0x240c, 0x000007ff, 0x00000000,
599 0x8a14, 0xf000001f, 0x00000007,
600 0x8b24, 0xffffffff, 0x00ffffff,
601 0x8b10, 0x0000ff0f, 0x00000000,
602 0x28a4c, 0x07ffffff, 0x4e000000,
603 0x28350, 0x3f3f3fff, 0x00000000,
604 0x30, 0x000000ff, 0x0040,
605 0x34, 0x00000040, 0x00004040,
606 0x9100, 0x03e00000, 0x03600000,
607 0x9060, 0x0000007f, 0x00000020,
608 0x9508, 0x00010000, 0x00010000,
609 0xac14, 0x000003ff, 0x000000f1,
610 0xac10, 0xffffffff, 0x00000000,
611 0xac0c, 0xffffffff, 0x00003210,
612 0x88d4, 0x0000001f, 0x00000010,
613 0x15c0, 0x000c0fc0, 0x000c0400
614};
615
616static const u32 hainan_golden_registers2[] =
617{
618 0x98f8, 0xffffffff, 0x02010001
619};
620
621static const u32 tahiti_mgcg_cgcg_init[] =
622{
623 0xc400, 0xffffffff, 0xfffffffc,
624 0x802c, 0xffffffff, 0xe0000000,
625 0x9a60, 0xffffffff, 0x00000100,
626 0x92a4, 0xffffffff, 0x00000100,
627 0xc164, 0xffffffff, 0x00000100,
628 0x9774, 0xffffffff, 0x00000100,
629 0x8984, 0xffffffff, 0x06000100,
630 0x8a18, 0xffffffff, 0x00000100,
631 0x92a0, 0xffffffff, 0x00000100,
632 0xc380, 0xffffffff, 0x00000100,
633 0x8b28, 0xffffffff, 0x00000100,
634 0x9144, 0xffffffff, 0x00000100,
635 0x8d88, 0xffffffff, 0x00000100,
636 0x8d8c, 0xffffffff, 0x00000100,
637 0x9030, 0xffffffff, 0x00000100,
638 0x9034, 0xffffffff, 0x00000100,
639 0x9038, 0xffffffff, 0x00000100,
640 0x903c, 0xffffffff, 0x00000100,
641 0xad80, 0xffffffff, 0x00000100,
642 0xac54, 0xffffffff, 0x00000100,
643 0x897c, 0xffffffff, 0x06000100,
644 0x9868, 0xffffffff, 0x00000100,
645 0x9510, 0xffffffff, 0x00000100,
646 0xaf04, 0xffffffff, 0x00000100,
647 0xae04, 0xffffffff, 0x00000100,
648 0x949c, 0xffffffff, 0x00000100,
649 0x802c, 0xffffffff, 0xe0000000,
650 0x9160, 0xffffffff, 0x00010000,
651 0x9164, 0xffffffff, 0x00030002,
652 0x9168, 0xffffffff, 0x00040007,
653 0x916c, 0xffffffff, 0x00060005,
654 0x9170, 0xffffffff, 0x00090008,
655 0x9174, 0xffffffff, 0x00020001,
656 0x9178, 0xffffffff, 0x00040003,
657 0x917c, 0xffffffff, 0x00000007,
658 0x9180, 0xffffffff, 0x00060005,
659 0x9184, 0xffffffff, 0x00090008,
660 0x9188, 0xffffffff, 0x00030002,
661 0x918c, 0xffffffff, 0x00050004,
662 0x9190, 0xffffffff, 0x00000008,
663 0x9194, 0xffffffff, 0x00070006,
664 0x9198, 0xffffffff, 0x000a0009,
665 0x919c, 0xffffffff, 0x00040003,
666 0x91a0, 0xffffffff, 0x00060005,
667 0x91a4, 0xffffffff, 0x00000009,
668 0x91a8, 0xffffffff, 0x00080007,
669 0x91ac, 0xffffffff, 0x000b000a,
670 0x91b0, 0xffffffff, 0x00050004,
671 0x91b4, 0xffffffff, 0x00070006,
672 0x91b8, 0xffffffff, 0x0008000b,
673 0x91bc, 0xffffffff, 0x000a0009,
674 0x91c0, 0xffffffff, 0x000d000c,
675 0x91c4, 0xffffffff, 0x00060005,
676 0x91c8, 0xffffffff, 0x00080007,
677 0x91cc, 0xffffffff, 0x0000000b,
678 0x91d0, 0xffffffff, 0x000a0009,
679 0x91d4, 0xffffffff, 0x000d000c,
680 0x91d8, 0xffffffff, 0x00070006,
681 0x91dc, 0xffffffff, 0x00090008,
682 0x91e0, 0xffffffff, 0x0000000c,
683 0x91e4, 0xffffffff, 0x000b000a,
684 0x91e8, 0xffffffff, 0x000e000d,
685 0x91ec, 0xffffffff, 0x00080007,
686 0x91f0, 0xffffffff, 0x000a0009,
687 0x91f4, 0xffffffff, 0x0000000d,
688 0x91f8, 0xffffffff, 0x000c000b,
689 0x91fc, 0xffffffff, 0x000f000e,
690 0x9200, 0xffffffff, 0x00090008,
691 0x9204, 0xffffffff, 0x000b000a,
692 0x9208, 0xffffffff, 0x000c000f,
693 0x920c, 0xffffffff, 0x000e000d,
694 0x9210, 0xffffffff, 0x00110010,
695 0x9214, 0xffffffff, 0x000a0009,
696 0x9218, 0xffffffff, 0x000c000b,
697 0x921c, 0xffffffff, 0x0000000f,
698 0x9220, 0xffffffff, 0x000e000d,
699 0x9224, 0xffffffff, 0x00110010,
700 0x9228, 0xffffffff, 0x000b000a,
701 0x922c, 0xffffffff, 0x000d000c,
702 0x9230, 0xffffffff, 0x00000010,
703 0x9234, 0xffffffff, 0x000f000e,
704 0x9238, 0xffffffff, 0x00120011,
705 0x923c, 0xffffffff, 0x000c000b,
706 0x9240, 0xffffffff, 0x000e000d,
707 0x9244, 0xffffffff, 0x00000011,
708 0x9248, 0xffffffff, 0x0010000f,
709 0x924c, 0xffffffff, 0x00130012,
710 0x9250, 0xffffffff, 0x000d000c,
711 0x9254, 0xffffffff, 0x000f000e,
712 0x9258, 0xffffffff, 0x00100013,
713 0x925c, 0xffffffff, 0x00120011,
714 0x9260, 0xffffffff, 0x00150014,
715 0x9264, 0xffffffff, 0x000e000d,
716 0x9268, 0xffffffff, 0x0010000f,
717 0x926c, 0xffffffff, 0x00000013,
718 0x9270, 0xffffffff, 0x00120011,
719 0x9274, 0xffffffff, 0x00150014,
720 0x9278, 0xffffffff, 0x000f000e,
721 0x927c, 0xffffffff, 0x00110010,
722 0x9280, 0xffffffff, 0x00000014,
723 0x9284, 0xffffffff, 0x00130012,
724 0x9288, 0xffffffff, 0x00160015,
725 0x928c, 0xffffffff, 0x0010000f,
726 0x9290, 0xffffffff, 0x00120011,
727 0x9294, 0xffffffff, 0x00000015,
728 0x9298, 0xffffffff, 0x00140013,
729 0x929c, 0xffffffff, 0x00170016,
730 0x9150, 0xffffffff, 0x96940200,
731 0x8708, 0xffffffff, 0x00900100,
732 0xc478, 0xffffffff, 0x00000080,
733 0xc404, 0xffffffff, 0x0020003f,
734 0x30, 0xffffffff, 0x0000001c,
735 0x34, 0x000f0000, 0x000f0000,
736 0x160c, 0xffffffff, 0x00000100,
737 0x1024, 0xffffffff, 0x00000100,
738 0x102c, 0x00000101, 0x00000000,
739 0x20a8, 0xffffffff, 0x00000104,
740 0x264c, 0x000c0000, 0x000c0000,
741 0x2648, 0x000c0000, 0x000c0000,
742 0x55e4, 0xff000fff, 0x00000100,
743 0x55e8, 0x00000001, 0x00000001,
744 0x2f50, 0x00000001, 0x00000001,
745 0x30cc, 0xc0000fff, 0x00000104,
746 0xc1e4, 0x00000001, 0x00000001,
747 0xd0c0, 0xfffffff0, 0x00000100,
748 0xd8c0, 0xfffffff0, 0x00000100
749};
750
751static const u32 pitcairn_mgcg_cgcg_init[] =
752{
753 0xc400, 0xffffffff, 0xfffffffc,
754 0x802c, 0xffffffff, 0xe0000000,
755 0x9a60, 0xffffffff, 0x00000100,
756 0x92a4, 0xffffffff, 0x00000100,
757 0xc164, 0xffffffff, 0x00000100,
758 0x9774, 0xffffffff, 0x00000100,
759 0x8984, 0xffffffff, 0x06000100,
760 0x8a18, 0xffffffff, 0x00000100,
761 0x92a0, 0xffffffff, 0x00000100,
762 0xc380, 0xffffffff, 0x00000100,
763 0x8b28, 0xffffffff, 0x00000100,
764 0x9144, 0xffffffff, 0x00000100,
765 0x8d88, 0xffffffff, 0x00000100,
766 0x8d8c, 0xffffffff, 0x00000100,
767 0x9030, 0xffffffff, 0x00000100,
768 0x9034, 0xffffffff, 0x00000100,
769 0x9038, 0xffffffff, 0x00000100,
770 0x903c, 0xffffffff, 0x00000100,
771 0xad80, 0xffffffff, 0x00000100,
772 0xac54, 0xffffffff, 0x00000100,
773 0x897c, 0xffffffff, 0x06000100,
774 0x9868, 0xffffffff, 0x00000100,
775 0x9510, 0xffffffff, 0x00000100,
776 0xaf04, 0xffffffff, 0x00000100,
777 0xae04, 0xffffffff, 0x00000100,
778 0x949c, 0xffffffff, 0x00000100,
779 0x802c, 0xffffffff, 0xe0000000,
780 0x9160, 0xffffffff, 0x00010000,
781 0x9164, 0xffffffff, 0x00030002,
782 0x9168, 0xffffffff, 0x00040007,
783 0x916c, 0xffffffff, 0x00060005,
784 0x9170, 0xffffffff, 0x00090008,
785 0x9174, 0xffffffff, 0x00020001,
786 0x9178, 0xffffffff, 0x00040003,
787 0x917c, 0xffffffff, 0x00000007,
788 0x9180, 0xffffffff, 0x00060005,
789 0x9184, 0xffffffff, 0x00090008,
790 0x9188, 0xffffffff, 0x00030002,
791 0x918c, 0xffffffff, 0x00050004,
792 0x9190, 0xffffffff, 0x00000008,
793 0x9194, 0xffffffff, 0x00070006,
794 0x9198, 0xffffffff, 0x000a0009,
795 0x919c, 0xffffffff, 0x00040003,
796 0x91a0, 0xffffffff, 0x00060005,
797 0x91a4, 0xffffffff, 0x00000009,
798 0x91a8, 0xffffffff, 0x00080007,
799 0x91ac, 0xffffffff, 0x000b000a,
800 0x91b0, 0xffffffff, 0x00050004,
801 0x91b4, 0xffffffff, 0x00070006,
802 0x91b8, 0xffffffff, 0x0008000b,
803 0x91bc, 0xffffffff, 0x000a0009,
804 0x91c0, 0xffffffff, 0x000d000c,
805 0x9200, 0xffffffff, 0x00090008,
806 0x9204, 0xffffffff, 0x000b000a,
807 0x9208, 0xffffffff, 0x000c000f,
808 0x920c, 0xffffffff, 0x000e000d,
809 0x9210, 0xffffffff, 0x00110010,
810 0x9214, 0xffffffff, 0x000a0009,
811 0x9218, 0xffffffff, 0x000c000b,
812 0x921c, 0xffffffff, 0x0000000f,
813 0x9220, 0xffffffff, 0x000e000d,
814 0x9224, 0xffffffff, 0x00110010,
815 0x9228, 0xffffffff, 0x000b000a,
816 0x922c, 0xffffffff, 0x000d000c,
817 0x9230, 0xffffffff, 0x00000010,
818 0x9234, 0xffffffff, 0x000f000e,
819 0x9238, 0xffffffff, 0x00120011,
820 0x923c, 0xffffffff, 0x000c000b,
821 0x9240, 0xffffffff, 0x000e000d,
822 0x9244, 0xffffffff, 0x00000011,
823 0x9248, 0xffffffff, 0x0010000f,
824 0x924c, 0xffffffff, 0x00130012,
825 0x9250, 0xffffffff, 0x000d000c,
826 0x9254, 0xffffffff, 0x000f000e,
827 0x9258, 0xffffffff, 0x00100013,
828 0x925c, 0xffffffff, 0x00120011,
829 0x9260, 0xffffffff, 0x00150014,
830 0x9150, 0xffffffff, 0x96940200,
831 0x8708, 0xffffffff, 0x00900100,
832 0xc478, 0xffffffff, 0x00000080,
833 0xc404, 0xffffffff, 0x0020003f,
834 0x30, 0xffffffff, 0x0000001c,
835 0x34, 0x000f0000, 0x000f0000,
836 0x160c, 0xffffffff, 0x00000100,
837 0x1024, 0xffffffff, 0x00000100,
838 0x102c, 0x00000101, 0x00000000,
839 0x20a8, 0xffffffff, 0x00000104,
840 0x55e4, 0xff000fff, 0x00000100,
841 0x55e8, 0x00000001, 0x00000001,
842 0x2f50, 0x00000001, 0x00000001,
843 0x30cc, 0xc0000fff, 0x00000104,
844 0xc1e4, 0x00000001, 0x00000001,
845 0xd0c0, 0xfffffff0, 0x00000100,
846 0xd8c0, 0xfffffff0, 0x00000100
847};
848
849static const u32 verde_mgcg_cgcg_init[] =
850{
851 0xc400, 0xffffffff, 0xfffffffc,
852 0x802c, 0xffffffff, 0xe0000000,
853 0x9a60, 0xffffffff, 0x00000100,
854 0x92a4, 0xffffffff, 0x00000100,
855 0xc164, 0xffffffff, 0x00000100,
856 0x9774, 0xffffffff, 0x00000100,
857 0x8984, 0xffffffff, 0x06000100,
858 0x8a18, 0xffffffff, 0x00000100,
859 0x92a0, 0xffffffff, 0x00000100,
860 0xc380, 0xffffffff, 0x00000100,
861 0x8b28, 0xffffffff, 0x00000100,
862 0x9144, 0xffffffff, 0x00000100,
863 0x8d88, 0xffffffff, 0x00000100,
864 0x8d8c, 0xffffffff, 0x00000100,
865 0x9030, 0xffffffff, 0x00000100,
866 0x9034, 0xffffffff, 0x00000100,
867 0x9038, 0xffffffff, 0x00000100,
868 0x903c, 0xffffffff, 0x00000100,
869 0xad80, 0xffffffff, 0x00000100,
870 0xac54, 0xffffffff, 0x00000100,
871 0x897c, 0xffffffff, 0x06000100,
872 0x9868, 0xffffffff, 0x00000100,
873 0x9510, 0xffffffff, 0x00000100,
874 0xaf04, 0xffffffff, 0x00000100,
875 0xae04, 0xffffffff, 0x00000100,
876 0x949c, 0xffffffff, 0x00000100,
877 0x802c, 0xffffffff, 0xe0000000,
878 0x9160, 0xffffffff, 0x00010000,
879 0x9164, 0xffffffff, 0x00030002,
880 0x9168, 0xffffffff, 0x00040007,
881 0x916c, 0xffffffff, 0x00060005,
882 0x9170, 0xffffffff, 0x00090008,
883 0x9174, 0xffffffff, 0x00020001,
884 0x9178, 0xffffffff, 0x00040003,
885 0x917c, 0xffffffff, 0x00000007,
886 0x9180, 0xffffffff, 0x00060005,
887 0x9184, 0xffffffff, 0x00090008,
888 0x9188, 0xffffffff, 0x00030002,
889 0x918c, 0xffffffff, 0x00050004,
890 0x9190, 0xffffffff, 0x00000008,
891 0x9194, 0xffffffff, 0x00070006,
892 0x9198, 0xffffffff, 0x000a0009,
893 0x919c, 0xffffffff, 0x00040003,
894 0x91a0, 0xffffffff, 0x00060005,
895 0x91a4, 0xffffffff, 0x00000009,
896 0x91a8, 0xffffffff, 0x00080007,
897 0x91ac, 0xffffffff, 0x000b000a,
898 0x91b0, 0xffffffff, 0x00050004,
899 0x91b4, 0xffffffff, 0x00070006,
900 0x91b8, 0xffffffff, 0x0008000b,
901 0x91bc, 0xffffffff, 0x000a0009,
902 0x91c0, 0xffffffff, 0x000d000c,
903 0x9200, 0xffffffff, 0x00090008,
904 0x9204, 0xffffffff, 0x000b000a,
905 0x9208, 0xffffffff, 0x000c000f,
906 0x920c, 0xffffffff, 0x000e000d,
907 0x9210, 0xffffffff, 0x00110010,
908 0x9214, 0xffffffff, 0x000a0009,
909 0x9218, 0xffffffff, 0x000c000b,
910 0x921c, 0xffffffff, 0x0000000f,
911 0x9220, 0xffffffff, 0x000e000d,
912 0x9224, 0xffffffff, 0x00110010,
913 0x9228, 0xffffffff, 0x000b000a,
914 0x922c, 0xffffffff, 0x000d000c,
915 0x9230, 0xffffffff, 0x00000010,
916 0x9234, 0xffffffff, 0x000f000e,
917 0x9238, 0xffffffff, 0x00120011,
918 0x923c, 0xffffffff, 0x000c000b,
919 0x9240, 0xffffffff, 0x000e000d,
920 0x9244, 0xffffffff, 0x00000011,
921 0x9248, 0xffffffff, 0x0010000f,
922 0x924c, 0xffffffff, 0x00130012,
923 0x9250, 0xffffffff, 0x000d000c,
924 0x9254, 0xffffffff, 0x000f000e,
925 0x9258, 0xffffffff, 0x00100013,
926 0x925c, 0xffffffff, 0x00120011,
927 0x9260, 0xffffffff, 0x00150014,
928 0x9150, 0xffffffff, 0x96940200,
929 0x8708, 0xffffffff, 0x00900100,
930 0xc478, 0xffffffff, 0x00000080,
931 0xc404, 0xffffffff, 0x0020003f,
932 0x30, 0xffffffff, 0x0000001c,
933 0x34, 0x000f0000, 0x000f0000,
934 0x160c, 0xffffffff, 0x00000100,
935 0x1024, 0xffffffff, 0x00000100,
936 0x102c, 0x00000101, 0x00000000,
937 0x20a8, 0xffffffff, 0x00000104,
938 0x264c, 0x000c0000, 0x000c0000,
939 0x2648, 0x000c0000, 0x000c0000,
940 0x55e4, 0xff000fff, 0x00000100,
941 0x55e8, 0x00000001, 0x00000001,
942 0x2f50, 0x00000001, 0x00000001,
943 0x30cc, 0xc0000fff, 0x00000104,
944 0xc1e4, 0x00000001, 0x00000001,
945 0xd0c0, 0xfffffff0, 0x00000100,
946 0xd8c0, 0xfffffff0, 0x00000100
947};
948
949static const u32 oland_mgcg_cgcg_init[] =
950{
951 0xc400, 0xffffffff, 0xfffffffc,
952 0x802c, 0xffffffff, 0xe0000000,
953 0x9a60, 0xffffffff, 0x00000100,
954 0x92a4, 0xffffffff, 0x00000100,
955 0xc164, 0xffffffff, 0x00000100,
956 0x9774, 0xffffffff, 0x00000100,
957 0x8984, 0xffffffff, 0x06000100,
958 0x8a18, 0xffffffff, 0x00000100,
959 0x92a0, 0xffffffff, 0x00000100,
960 0xc380, 0xffffffff, 0x00000100,
961 0x8b28, 0xffffffff, 0x00000100,
962 0x9144, 0xffffffff, 0x00000100,
963 0x8d88, 0xffffffff, 0x00000100,
964 0x8d8c, 0xffffffff, 0x00000100,
965 0x9030, 0xffffffff, 0x00000100,
966 0x9034, 0xffffffff, 0x00000100,
967 0x9038, 0xffffffff, 0x00000100,
968 0x903c, 0xffffffff, 0x00000100,
969 0xad80, 0xffffffff, 0x00000100,
970 0xac54, 0xffffffff, 0x00000100,
971 0x897c, 0xffffffff, 0x06000100,
972 0x9868, 0xffffffff, 0x00000100,
973 0x9510, 0xffffffff, 0x00000100,
974 0xaf04, 0xffffffff, 0x00000100,
975 0xae04, 0xffffffff, 0x00000100,
976 0x949c, 0xffffffff, 0x00000100,
977 0x802c, 0xffffffff, 0xe0000000,
978 0x9160, 0xffffffff, 0x00010000,
979 0x9164, 0xffffffff, 0x00030002,
980 0x9168, 0xffffffff, 0x00040007,
981 0x916c, 0xffffffff, 0x00060005,
982 0x9170, 0xffffffff, 0x00090008,
983 0x9174, 0xffffffff, 0x00020001,
984 0x9178, 0xffffffff, 0x00040003,
985 0x917c, 0xffffffff, 0x00000007,
986 0x9180, 0xffffffff, 0x00060005,
987 0x9184, 0xffffffff, 0x00090008,
988 0x9188, 0xffffffff, 0x00030002,
989 0x918c, 0xffffffff, 0x00050004,
990 0x9190, 0xffffffff, 0x00000008,
991 0x9194, 0xffffffff, 0x00070006,
992 0x9198, 0xffffffff, 0x000a0009,
993 0x919c, 0xffffffff, 0x00040003,
994 0x91a0, 0xffffffff, 0x00060005,
995 0x91a4, 0xffffffff, 0x00000009,
996 0x91a8, 0xffffffff, 0x00080007,
997 0x91ac, 0xffffffff, 0x000b000a,
998 0x91b0, 0xffffffff, 0x00050004,
999 0x91b4, 0xffffffff, 0x00070006,
1000 0x91b8, 0xffffffff, 0x0008000b,
1001 0x91bc, 0xffffffff, 0x000a0009,
1002 0x91c0, 0xffffffff, 0x000d000c,
1003 0x91c4, 0xffffffff, 0x00060005,
1004 0x91c8, 0xffffffff, 0x00080007,
1005 0x91cc, 0xffffffff, 0x0000000b,
1006 0x91d0, 0xffffffff, 0x000a0009,
1007 0x91d4, 0xffffffff, 0x000d000c,
1008 0x9150, 0xffffffff, 0x96940200,
1009 0x8708, 0xffffffff, 0x00900100,
1010 0xc478, 0xffffffff, 0x00000080,
1011 0xc404, 0xffffffff, 0x0020003f,
1012 0x30, 0xffffffff, 0x0000001c,
1013 0x34, 0x000f0000, 0x000f0000,
1014 0x160c, 0xffffffff, 0x00000100,
1015 0x1024, 0xffffffff, 0x00000100,
1016 0x102c, 0x00000101, 0x00000000,
1017 0x20a8, 0xffffffff, 0x00000104,
1018 0x264c, 0x000c0000, 0x000c0000,
1019 0x2648, 0x000c0000, 0x000c0000,
1020 0x55e4, 0xff000fff, 0x00000100,
1021 0x55e8, 0x00000001, 0x00000001,
1022 0x2f50, 0x00000001, 0x00000001,
1023 0x30cc, 0xc0000fff, 0x00000104,
1024 0xc1e4, 0x00000001, 0x00000001,
1025 0xd0c0, 0xfffffff0, 0x00000100,
1026 0xd8c0, 0xfffffff0, 0x00000100
1027};
1028
1029static const u32 hainan_mgcg_cgcg_init[] =
1030{
1031 0xc400, 0xffffffff, 0xfffffffc,
1032 0x802c, 0xffffffff, 0xe0000000,
1033 0x9a60, 0xffffffff, 0x00000100,
1034 0x92a4, 0xffffffff, 0x00000100,
1035 0xc164, 0xffffffff, 0x00000100,
1036 0x9774, 0xffffffff, 0x00000100,
1037 0x8984, 0xffffffff, 0x06000100,
1038 0x8a18, 0xffffffff, 0x00000100,
1039 0x92a0, 0xffffffff, 0x00000100,
1040 0xc380, 0xffffffff, 0x00000100,
1041 0x8b28, 0xffffffff, 0x00000100,
1042 0x9144, 0xffffffff, 0x00000100,
1043 0x8d88, 0xffffffff, 0x00000100,
1044 0x8d8c, 0xffffffff, 0x00000100,
1045 0x9030, 0xffffffff, 0x00000100,
1046 0x9034, 0xffffffff, 0x00000100,
1047 0x9038, 0xffffffff, 0x00000100,
1048 0x903c, 0xffffffff, 0x00000100,
1049 0xad80, 0xffffffff, 0x00000100,
1050 0xac54, 0xffffffff, 0x00000100,
1051 0x897c, 0xffffffff, 0x06000100,
1052 0x9868, 0xffffffff, 0x00000100,
1053 0x9510, 0xffffffff, 0x00000100,
1054 0xaf04, 0xffffffff, 0x00000100,
1055 0xae04, 0xffffffff, 0x00000100,
1056 0x949c, 0xffffffff, 0x00000100,
1057 0x802c, 0xffffffff, 0xe0000000,
1058 0x9160, 0xffffffff, 0x00010000,
1059 0x9164, 0xffffffff, 0x00030002,
1060 0x9168, 0xffffffff, 0x00040007,
1061 0x916c, 0xffffffff, 0x00060005,
1062 0x9170, 0xffffffff, 0x00090008,
1063 0x9174, 0xffffffff, 0x00020001,
1064 0x9178, 0xffffffff, 0x00040003,
1065 0x917c, 0xffffffff, 0x00000007,
1066 0x9180, 0xffffffff, 0x00060005,
1067 0x9184, 0xffffffff, 0x00090008,
1068 0x9188, 0xffffffff, 0x00030002,
1069 0x918c, 0xffffffff, 0x00050004,
1070 0x9190, 0xffffffff, 0x00000008,
1071 0x9194, 0xffffffff, 0x00070006,
1072 0x9198, 0xffffffff, 0x000a0009,
1073 0x919c, 0xffffffff, 0x00040003,
1074 0x91a0, 0xffffffff, 0x00060005,
1075 0x91a4, 0xffffffff, 0x00000009,
1076 0x91a8, 0xffffffff, 0x00080007,
1077 0x91ac, 0xffffffff, 0x000b000a,
1078 0x91b0, 0xffffffff, 0x00050004,
1079 0x91b4, 0xffffffff, 0x00070006,
1080 0x91b8, 0xffffffff, 0x0008000b,
1081 0x91bc, 0xffffffff, 0x000a0009,
1082 0x91c0, 0xffffffff, 0x000d000c,
1083 0x91c4, 0xffffffff, 0x00060005,
1084 0x91c8, 0xffffffff, 0x00080007,
1085 0x91cc, 0xffffffff, 0x0000000b,
1086 0x91d0, 0xffffffff, 0x000a0009,
1087 0x91d4, 0xffffffff, 0x000d000c,
1088 0x9150, 0xffffffff, 0x96940200,
1089 0x8708, 0xffffffff, 0x00900100,
1090 0xc478, 0xffffffff, 0x00000080,
1091 0xc404, 0xffffffff, 0x0020003f,
1092 0x30, 0xffffffff, 0x0000001c,
1093 0x34, 0x000f0000, 0x000f0000,
1094 0x160c, 0xffffffff, 0x00000100,
1095 0x1024, 0xffffffff, 0x00000100,
1096 0x20a8, 0xffffffff, 0x00000104,
1097 0x264c, 0x000c0000, 0x000c0000,
1098 0x2648, 0x000c0000, 0x000c0000,
1099 0x2f50, 0x00000001, 0x00000001,
1100 0x30cc, 0xc0000fff, 0x00000104,
1101 0xc1e4, 0x00000001, 0x00000001,
1102 0xd0c0, 0xfffffff0, 0x00000100,
1103 0xd8c0, 0xfffffff0, 0x00000100
1104};
1105
1106static u32 verde_pg_init[] =
1107{
1108 0x353c, 0xffffffff, 0x40000,
1109 0x3538, 0xffffffff, 0x200010ff,
1110 0x353c, 0xffffffff, 0x0,
1111 0x353c, 0xffffffff, 0x0,
1112 0x353c, 0xffffffff, 0x0,
1113 0x353c, 0xffffffff, 0x0,
1114 0x353c, 0xffffffff, 0x0,
1115 0x353c, 0xffffffff, 0x7007,
1116 0x3538, 0xffffffff, 0x300010ff,
1117 0x353c, 0xffffffff, 0x0,
1118 0x353c, 0xffffffff, 0x0,
1119 0x353c, 0xffffffff, 0x0,
1120 0x353c, 0xffffffff, 0x0,
1121 0x353c, 0xffffffff, 0x0,
1122 0x353c, 0xffffffff, 0x400000,
1123 0x3538, 0xffffffff, 0x100010ff,
1124 0x353c, 0xffffffff, 0x0,
1125 0x353c, 0xffffffff, 0x0,
1126 0x353c, 0xffffffff, 0x0,
1127 0x353c, 0xffffffff, 0x0,
1128 0x353c, 0xffffffff, 0x0,
1129 0x353c, 0xffffffff, 0x120200,
1130 0x3538, 0xffffffff, 0x500010ff,
1131 0x353c, 0xffffffff, 0x0,
1132 0x353c, 0xffffffff, 0x0,
1133 0x353c, 0xffffffff, 0x0,
1134 0x353c, 0xffffffff, 0x0,
1135 0x353c, 0xffffffff, 0x0,
1136 0x353c, 0xffffffff, 0x1e1e16,
1137 0x3538, 0xffffffff, 0x600010ff,
1138 0x353c, 0xffffffff, 0x0,
1139 0x353c, 0xffffffff, 0x0,
1140 0x353c, 0xffffffff, 0x0,
1141 0x353c, 0xffffffff, 0x0,
1142 0x353c, 0xffffffff, 0x0,
1143 0x353c, 0xffffffff, 0x171f1e,
1144 0x3538, 0xffffffff, 0x700010ff,
1145 0x353c, 0xffffffff, 0x0,
1146 0x353c, 0xffffffff, 0x0,
1147 0x353c, 0xffffffff, 0x0,
1148 0x353c, 0xffffffff, 0x0,
1149 0x353c, 0xffffffff, 0x0,
1150 0x353c, 0xffffffff, 0x0,
1151 0x3538, 0xffffffff, 0x9ff,
1152 0x3500, 0xffffffff, 0x0,
1153 0x3504, 0xffffffff, 0x10000800,
1154 0x3504, 0xffffffff, 0xf,
1155 0x3504, 0xffffffff, 0xf,
1156 0x3500, 0xffffffff, 0x4,
1157 0x3504, 0xffffffff, 0x1000051e,
1158 0x3504, 0xffffffff, 0xffff,
1159 0x3504, 0xffffffff, 0xffff,
1160 0x3500, 0xffffffff, 0x8,
1161 0x3504, 0xffffffff, 0x80500,
1162 0x3500, 0xffffffff, 0x12,
1163 0x3504, 0xffffffff, 0x9050c,
1164 0x3500, 0xffffffff, 0x1d,
1165 0x3504, 0xffffffff, 0xb052c,
1166 0x3500, 0xffffffff, 0x2a,
1167 0x3504, 0xffffffff, 0x1053e,
1168 0x3500, 0xffffffff, 0x2d,
1169 0x3504, 0xffffffff, 0x10546,
1170 0x3500, 0xffffffff, 0x30,
1171 0x3504, 0xffffffff, 0xa054e,
1172 0x3500, 0xffffffff, 0x3c,
1173 0x3504, 0xffffffff, 0x1055f,
1174 0x3500, 0xffffffff, 0x3f,
1175 0x3504, 0xffffffff, 0x10567,
1176 0x3500, 0xffffffff, 0x42,
1177 0x3504, 0xffffffff, 0x1056f,
1178 0x3500, 0xffffffff, 0x45,
1179 0x3504, 0xffffffff, 0x10572,
1180 0x3500, 0xffffffff, 0x48,
1181 0x3504, 0xffffffff, 0x20575,
1182 0x3500, 0xffffffff, 0x4c,
1183 0x3504, 0xffffffff, 0x190801,
1184 0x3500, 0xffffffff, 0x67,
1185 0x3504, 0xffffffff, 0x1082a,
1186 0x3500, 0xffffffff, 0x6a,
1187 0x3504, 0xffffffff, 0x1b082d,
1188 0x3500, 0xffffffff, 0x87,
1189 0x3504, 0xffffffff, 0x310851,
1190 0x3500, 0xffffffff, 0xba,
1191 0x3504, 0xffffffff, 0x891,
1192 0x3500, 0xffffffff, 0xbc,
1193 0x3504, 0xffffffff, 0x893,
1194 0x3500, 0xffffffff, 0xbe,
1195 0x3504, 0xffffffff, 0x20895,
1196 0x3500, 0xffffffff, 0xc2,
1197 0x3504, 0xffffffff, 0x20899,
1198 0x3500, 0xffffffff, 0xc6,
1199 0x3504, 0xffffffff, 0x2089d,
1200 0x3500, 0xffffffff, 0xca,
1201 0x3504, 0xffffffff, 0x8a1,
1202 0x3500, 0xffffffff, 0xcc,
1203 0x3504, 0xffffffff, 0x8a3,
1204 0x3500, 0xffffffff, 0xce,
1205 0x3504, 0xffffffff, 0x308a5,
1206 0x3500, 0xffffffff, 0xd3,
1207 0x3504, 0xffffffff, 0x6d08cd,
1208 0x3500, 0xffffffff, 0x142,
1209 0x3504, 0xffffffff, 0x2000095a,
1210 0x3504, 0xffffffff, 0x1,
1211 0x3500, 0xffffffff, 0x144,
1212 0x3504, 0xffffffff, 0x301f095b,
1213 0x3500, 0xffffffff, 0x165,
1214 0x3504, 0xffffffff, 0xc094d,
1215 0x3500, 0xffffffff, 0x173,
1216 0x3504, 0xffffffff, 0xf096d,
1217 0x3500, 0xffffffff, 0x184,
1218 0x3504, 0xffffffff, 0x15097f,
1219 0x3500, 0xffffffff, 0x19b,
1220 0x3504, 0xffffffff, 0xc0998,
1221 0x3500, 0xffffffff, 0x1a9,
1222 0x3504, 0xffffffff, 0x409a7,
1223 0x3500, 0xffffffff, 0x1af,
1224 0x3504, 0xffffffff, 0xcdc,
1225 0x3500, 0xffffffff, 0x1b1,
1226 0x3504, 0xffffffff, 0x800,
1227 0x3508, 0xffffffff, 0x6c9b2000,
1228 0x3510, 0xfc00, 0x2000,
1229 0x3544, 0xffffffff, 0xfc0,
1230 0x28d4, 0x00000100, 0x100
1231};
1232
1233static void si_init_golden_registers(struct radeon_device *rdev)
1234{
1235 switch (rdev->family) {
1236 case CHIP_TAHITI:
1237 radeon_program_register_sequence(rdev,
1238 tahiti_golden_registers,
1239 (const u32)ARRAY_SIZE(tahiti_golden_registers)(sizeof((tahiti_golden_registers)) / sizeof((tahiti_golden_registers
)[0]))
);
1240 radeon_program_register_sequence(rdev,
1241 tahiti_golden_rlc_registers,
1242 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers)(sizeof((tahiti_golden_rlc_registers)) / sizeof((tahiti_golden_rlc_registers
)[0]))
);
1243 radeon_program_register_sequence(rdev,
1244 tahiti_mgcg_cgcg_init,
1245 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init)(sizeof((tahiti_mgcg_cgcg_init)) / sizeof((tahiti_mgcg_cgcg_init
)[0]))
);
1246 radeon_program_register_sequence(rdev,
1247 tahiti_golden_registers2,
1248 (const u32)ARRAY_SIZE(tahiti_golden_registers2)(sizeof((tahiti_golden_registers2)) / sizeof((tahiti_golden_registers2
)[0]))
);
1249 break;
1250 case CHIP_PITCAIRN:
1251 radeon_program_register_sequence(rdev,
1252 pitcairn_golden_registers,
1253 (const u32)ARRAY_SIZE(pitcairn_golden_registers)(sizeof((pitcairn_golden_registers)) / sizeof((pitcairn_golden_registers
)[0]))
);
1254 radeon_program_register_sequence(rdev,
1255 pitcairn_golden_rlc_registers,
1256 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers)(sizeof((pitcairn_golden_rlc_registers)) / sizeof((pitcairn_golden_rlc_registers
)[0]))
);
1257 radeon_program_register_sequence(rdev,
1258 pitcairn_mgcg_cgcg_init,
1259 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init)(sizeof((pitcairn_mgcg_cgcg_init)) / sizeof((pitcairn_mgcg_cgcg_init
)[0]))
);
1260 break;
1261 case CHIP_VERDE:
1262 radeon_program_register_sequence(rdev,
1263 verde_golden_registers,
1264 (const u32)ARRAY_SIZE(verde_golden_registers)(sizeof((verde_golden_registers)) / sizeof((verde_golden_registers
)[0]))
);
1265 radeon_program_register_sequence(rdev,
1266 verde_golden_rlc_registers,
1267 (const u32)ARRAY_SIZE(verde_golden_rlc_registers)(sizeof((verde_golden_rlc_registers)) / sizeof((verde_golden_rlc_registers
)[0]))
);
1268 radeon_program_register_sequence(rdev,
1269 verde_mgcg_cgcg_init,
1270 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init)(sizeof((verde_mgcg_cgcg_init)) / sizeof((verde_mgcg_cgcg_init
)[0]))
);
1271 radeon_program_register_sequence(rdev,
1272 verde_pg_init,
1273 (const u32)ARRAY_SIZE(verde_pg_init)(sizeof((verde_pg_init)) / sizeof((verde_pg_init)[0])));
1274 break;
1275 case CHIP_OLAND:
1276 radeon_program_register_sequence(rdev,
1277 oland_golden_registers,
1278 (const u32)ARRAY_SIZE(oland_golden_registers)(sizeof((oland_golden_registers)) / sizeof((oland_golden_registers
)[0]))
);
1279 radeon_program_register_sequence(rdev,
1280 oland_golden_rlc_registers,
1281 (const u32)ARRAY_SIZE(oland_golden_rlc_registers)(sizeof((oland_golden_rlc_registers)) / sizeof((oland_golden_rlc_registers
)[0]))
);
1282 radeon_program_register_sequence(rdev,
1283 oland_mgcg_cgcg_init,
1284 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)(sizeof((oland_mgcg_cgcg_init)) / sizeof((oland_mgcg_cgcg_init
)[0]))
);
1285 break;
1286 case CHIP_HAINAN:
1287 radeon_program_register_sequence(rdev,
1288 hainan_golden_registers,
1289 (const u32)ARRAY_SIZE(hainan_golden_registers)(sizeof((hainan_golden_registers)) / sizeof((hainan_golden_registers
)[0]))
);
1290 radeon_program_register_sequence(rdev,
1291 hainan_golden_registers2,
1292 (const u32)ARRAY_SIZE(hainan_golden_registers2)(sizeof((hainan_golden_registers2)) / sizeof((hainan_golden_registers2
)[0]))
);
1293 radeon_program_register_sequence(rdev,
1294 hainan_mgcg_cgcg_init,
1295 (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init)(sizeof((hainan_mgcg_cgcg_init)) / sizeof((hainan_mgcg_cgcg_init
)[0]))
);
1296 break;
1297 default:
1298 break;
1299 }
1300}
1301
1302/**
1303 * si_get_allowed_info_register - fetch the register for the info ioctl
1304 *
1305 * @rdev: radeon_device pointer
1306 * @reg: register offset in bytes
1307 * @val: register value
1308 *
1309 * Returns 0 for success or -EINVAL for an invalid register
1310 *
1311 */
1312int si_get_allowed_info_register(struct radeon_device *rdev,
1313 u32 reg, u32 *val)
1314{
1315 switch (reg) {
1316 case GRBM_STATUS0x8010:
1317 case GRBM_STATUS20x8008:
1318 case GRBM_STATUS_SE00x8014:
1319 case GRBM_STATUS_SE10x8018:
1320 case SRBM_STATUS0xE50:
1321 case SRBM_STATUS20x0EC4:
1322 case (DMA_STATUS_REG0xd034 + DMA0_REGISTER_OFFSET0x0):
1323 case (DMA_STATUS_REG0xd034 + DMA1_REGISTER_OFFSET0x800):
1324 case UVD_STATUS0xf6bc:
1325 *val = RREG32(reg)r100_mm_rreg(rdev, (reg), 0);
1326 return 0;
1327 default:
1328 return -EINVAL22;
1329 }
1330}
1331
1332#define PCIE_BUS_CLK10000 10000
1333#define TCLK(10000 / 10) (PCIE_BUS_CLK10000 / 10)
1334
1335/**
1336 * si_get_xclk - get the xclk
1337 *
1338 * @rdev: radeon_device pointer
1339 *
1340 * Returns the reference clock used by the gfx engine
1341 * (SI).
1342 */
1343u32 si_get_xclk(struct radeon_device *rdev)
1344{
1345 u32 reference_clock = rdev->clock.spll.reference_freq;
1346 u32 tmp;
1347
1348 tmp = RREG32(CG_CLKPIN_CNTL_2)r100_mm_rreg(rdev, (0x664), 0);
1349 if (tmp & MUX_TCLK_TO_XCLK(1 << 8))
1350 return TCLK(10000 / 10);
1351
1352 tmp = RREG32(CG_CLKPIN_CNTL)r100_mm_rreg(rdev, (0x660), 0);
1353 if (tmp & XTALIN_DIVIDE(1 << 1))
1354 return reference_clock / 4;
1355
1356 return reference_clock;
1357}
1358
1359/* get temperature in millidegrees */
1360int si_get_temp(struct radeon_device *rdev)
1361{
1362 u32 temp;
1363 int actual_temp = 0;
1364
1365 temp = (RREG32(CG_MULT_THERMAL_STATUS)r100_mm_rreg(rdev, (0x714), 0) & CTF_TEMP_MASK0x0003fe00) >>
1366 CTF_TEMP_SHIFT9;
1367
1368 if (temp & 0x200)
1369 actual_temp = 255;
1370 else
1371 actual_temp = temp & 0x1ff;
1372
1373 actual_temp = (actual_temp * 1000);
1374
1375 return actual_temp;
1376}
1377
1378#define TAHITI_IO_MC_REGS_SIZE36 36
1379
1380static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE36][2] = {
1381 {0x0000006f, 0x03044000},
1382 {0x00000070, 0x0480c018},
1383 {0x00000071, 0x00000040},
1384 {0x00000072, 0x01000000},
1385 {0x00000074, 0x000000ff},
1386 {0x00000075, 0x00143400},
1387 {0x00000076, 0x08ec0800},
1388 {0x00000077, 0x040000cc},
1389 {0x00000079, 0x00000000},
1390 {0x0000007a, 0x21000409},
1391 {0x0000007c, 0x00000000},
1392 {0x0000007d, 0xe8000000},
1393 {0x0000007e, 0x044408a8},
1394 {0x0000007f, 0x00000003},
1395 {0x00000080, 0x00000000},
1396 {0x00000081, 0x01000000},
1397 {0x00000082, 0x02000000},
1398 {0x00000083, 0x00000000},
1399 {0x00000084, 0xe3f3e4f4},
1400 {0x00000085, 0x00052024},
1401 {0x00000087, 0x00000000},
1402 {0x00000088, 0x66036603},
1403 {0x00000089, 0x01000000},
1404 {0x0000008b, 0x1c0a0000},
1405 {0x0000008c, 0xff010000},
1406 {0x0000008e, 0xffffefff},
1407 {0x0000008f, 0xfff3efff},
1408 {0x00000090, 0xfff3efbf},
1409 {0x00000094, 0x00101101},
1410 {0x00000095, 0x00000fff},
1411 {0x00000096, 0x00116fff},
1412 {0x00000097, 0x60010000},
1413 {0x00000098, 0x10010000},
1414 {0x00000099, 0x00006000},
1415 {0x0000009a, 0x00001000},
1416 {0x0000009f, 0x00a77400}
1417};
1418
1419static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE36][2] = {
1420 {0x0000006f, 0x03044000},
1421 {0x00000070, 0x0480c018},
1422 {0x00000071, 0x00000040},
1423 {0x00000072, 0x01000000},
1424 {0x00000074, 0x000000ff},
1425 {0x00000075, 0x00143400},
1426 {0x00000076, 0x08ec0800},
1427 {0x00000077, 0x040000cc},
1428 {0x00000079, 0x00000000},
1429 {0x0000007a, 0x21000409},
1430 {0x0000007c, 0x00000000},
1431 {0x0000007d, 0xe8000000},
1432 {0x0000007e, 0x044408a8},
1433 {0x0000007f, 0x00000003},
1434 {0x00000080, 0x00000000},
1435 {0x00000081, 0x01000000},
1436 {0x00000082, 0x02000000},
1437 {0x00000083, 0x00000000},
1438 {0x00000084, 0xe3f3e4f4},
1439 {0x00000085, 0x00052024},
1440 {0x00000087, 0x00000000},
1441 {0x00000088, 0x66036603},
1442 {0x00000089, 0x01000000},
1443 {0x0000008b, 0x1c0a0000},
1444 {0x0000008c, 0xff010000},
1445 {0x0000008e, 0xffffefff},
1446 {0x0000008f, 0xfff3efff},
1447 {0x00000090, 0xfff3efbf},
1448 {0x00000094, 0x00101101},
1449 {0x00000095, 0x00000fff},
1450 {0x00000096, 0x00116fff},
1451 {0x00000097, 0x60010000},
1452 {0x00000098, 0x10010000},
1453 {0x00000099, 0x00006000},
1454 {0x0000009a, 0x00001000},
1455 {0x0000009f, 0x00a47400}
1456};
1457
1458static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE36][2] = {
1459 {0x0000006f, 0x03044000},
1460 {0x00000070, 0x0480c018},
1461 {0x00000071, 0x00000040},
1462 {0x00000072, 0x01000000},
1463 {0x00000074, 0x000000ff},
1464 {0x00000075, 0x00143400},
1465 {0x00000076, 0x08ec0800},
1466 {0x00000077, 0x040000cc},
1467 {0x00000079, 0x00000000},
1468 {0x0000007a, 0x21000409},
1469 {0x0000007c, 0x00000000},
1470 {0x0000007d, 0xe8000000},
1471 {0x0000007e, 0x044408a8},
1472 {0x0000007f, 0x00000003},
1473 {0x00000080, 0x00000000},
1474 {0x00000081, 0x01000000},
1475 {0x00000082, 0x02000000},
1476 {0x00000083, 0x00000000},
1477 {0x00000084, 0xe3f3e4f4},
1478 {0x00000085, 0x00052024},
1479 {0x00000087, 0x00000000},
1480 {0x00000088, 0x66036603},
1481 {0x00000089, 0x01000000},
1482 {0x0000008b, 0x1c0a0000},
1483 {0x0000008c, 0xff010000},
1484 {0x0000008e, 0xffffefff},
1485 {0x0000008f, 0xfff3efff},
1486 {0x00000090, 0xfff3efbf},
1487 {0x00000094, 0x00101101},
1488 {0x00000095, 0x00000fff},
1489 {0x00000096, 0x00116fff},
1490 {0x00000097, 0x60010000},
1491 {0x00000098, 0x10010000},
1492 {0x00000099, 0x00006000},
1493 {0x0000009a, 0x00001000},
1494 {0x0000009f, 0x00a37400}
1495};
1496
1497static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE36][2] = {
1498 {0x0000006f, 0x03044000},
1499 {0x00000070, 0x0480c018},
1500 {0x00000071, 0x00000040},
1501 {0x00000072, 0x01000000},
1502 {0x00000074, 0x000000ff},
1503 {0x00000075, 0x00143400},
1504 {0x00000076, 0x08ec0800},
1505 {0x00000077, 0x040000cc},
1506 {0x00000079, 0x00000000},
1507 {0x0000007a, 0x21000409},
1508 {0x0000007c, 0x00000000},
1509 {0x0000007d, 0xe8000000},
1510 {0x0000007e, 0x044408a8},
1511 {0x0000007f, 0x00000003},
1512 {0x00000080, 0x00000000},
1513 {0x00000081, 0x01000000},
1514 {0x00000082, 0x02000000},
1515 {0x00000083, 0x00000000},
1516 {0x00000084, 0xe3f3e4f4},
1517 {0x00000085, 0x00052024},
1518 {0x00000087, 0x00000000},
1519 {0x00000088, 0x66036603},
1520 {0x00000089, 0x01000000},
1521 {0x0000008b, 0x1c0a0000},
1522 {0x0000008c, 0xff010000},
1523 {0x0000008e, 0xffffefff},
1524 {0x0000008f, 0xfff3efff},
1525 {0x00000090, 0xfff3efbf},
1526 {0x00000094, 0x00101101},
1527 {0x00000095, 0x00000fff},
1528 {0x00000096, 0x00116fff},
1529 {0x00000097, 0x60010000},
1530 {0x00000098, 0x10010000},
1531 {0x00000099, 0x00006000},
1532 {0x0000009a, 0x00001000},
1533 {0x0000009f, 0x00a17730}
1534};
1535
1536static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE36][2] = {
1537 {0x0000006f, 0x03044000},
1538 {0x00000070, 0x0480c018},
1539 {0x00000071, 0x00000040},
1540 {0x00000072, 0x01000000},
1541 {0x00000074, 0x000000ff},
1542 {0x00000075, 0x00143400},
1543 {0x00000076, 0x08ec0800},
1544 {0x00000077, 0x040000cc},
1545 {0x00000079, 0x00000000},
1546 {0x0000007a, 0x21000409},
1547 {0x0000007c, 0x00000000},
1548 {0x0000007d, 0xe8000000},
1549 {0x0000007e, 0x044408a8},
1550 {0x0000007f, 0x00000003},
1551 {0x00000080, 0x00000000},
1552 {0x00000081, 0x01000000},
1553 {0x00000082, 0x02000000},
1554 {0x00000083, 0x00000000},
1555 {0x00000084, 0xe3f3e4f4},
1556 {0x00000085, 0x00052024},
1557 {0x00000087, 0x00000000},
1558 {0x00000088, 0x66036603},
1559 {0x00000089, 0x01000000},
1560 {0x0000008b, 0x1c0a0000},
1561 {0x0000008c, 0xff010000},
1562 {0x0000008e, 0xffffefff},
1563 {0x0000008f, 0xfff3efff},
1564 {0x00000090, 0xfff3efbf},
1565 {0x00000094, 0x00101101},
1566 {0x00000095, 0x00000fff},
1567 {0x00000096, 0x00116fff},
1568 {0x00000097, 0x60010000},
1569 {0x00000098, 0x10010000},
1570 {0x00000099, 0x00006000},
1571 {0x0000009a, 0x00001000},
1572 {0x0000009f, 0x00a07730}
1573};
1574
1575/* ucode loading */
1576int si_mc_load_microcode(struct radeon_device *rdev)
1577{
1578 const __be32 *fw_data = NULL((void *)0);
1579 const __le32 *new_fw_data = NULL((void *)0);
1580 u32 running;
1581 u32 *io_mc_regs = NULL((void *)0);
1582 const __le32 *new_io_mc_regs = NULL((void *)0);
1583 int i, regs_size, ucode_size;
1584
1585 if (!rdev->mc_fw)
1586 return -EINVAL22;
1587
1588 if (rdev->new_fw) {
1589 const struct mc_firmware_header_v1_0 *hdr =
1590 (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
1591
1592 radeon_ucode_print_mc_hdr(&hdr->header);
1593 regs_size = le32_to_cpu(hdr->io_debug_size_bytes)((__uint32_t)(hdr->io_debug_size_bytes)) / (4 * 2);
1594 new_io_mc_regs = (const __le32 *)
1595 (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)((__uint32_t)(hdr->io_debug_array_offset_bytes)));
1596 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes)((__uint32_t)(hdr->header.ucode_size_bytes)) / 4;
1597 new_fw_data = (const __le32 *)
1598 (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)((__uint32_t)(hdr->header.ucode_array_offset_bytes)));
1599 } else {
1600 ucode_size = rdev->mc_fw->size / 4;
1601
1602 switch (rdev->family) {
1603 case CHIP_TAHITI:
1604 io_mc_regs = (u32 *)&tahiti_io_mc_regs;
1605 regs_size = TAHITI_IO_MC_REGS_SIZE36;
1606 break;
1607 case CHIP_PITCAIRN:
1608 io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
1609 regs_size = TAHITI_IO_MC_REGS_SIZE36;
1610 break;
1611 case CHIP_VERDE:
1612 default:
1613 io_mc_regs = (u32 *)&verde_io_mc_regs;
1614 regs_size = TAHITI_IO_MC_REGS_SIZE36;
1615 break;
1616 case CHIP_OLAND:
1617 io_mc_regs = (u32 *)&oland_io_mc_regs;
1618 regs_size = TAHITI_IO_MC_REGS_SIZE36;
1619 break;
1620 case CHIP_HAINAN:
1621 io_mc_regs = (u32 *)&hainan_io_mc_regs;
1622 regs_size = TAHITI_IO_MC_REGS_SIZE36;
1623 break;
1624 }
1625 fw_data = (const __be32 *)rdev->mc_fw->data;
1626 }
1627
1628 running = RREG32(MC_SEQ_SUP_CNTL)r100_mm_rreg(rdev, (0x28c8), 0) & RUN_MASK(1 << 0);
1629
1630 if (running == 0) {
1631 /* reset the engine and set to writable */
1632 WREG32(MC_SEQ_SUP_CNTL, 0x00000008)r100_mm_wreg(rdev, (0x28c8), (0x00000008), 0);
1633 WREG32(MC_SEQ_SUP_CNTL, 0x00000010)r100_mm_wreg(rdev, (0x28c8), (0x00000010), 0);
1634
1635 /* load mc io regs */
1636 for (i = 0; i < regs_size; i++) {
1637 if (rdev->new_fw) {
1638 WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++))r100_mm_wreg(rdev, (0x2a44), (((__uint32_t)(*(__uint32_t *)(new_io_mc_regs
++)))), 0)
;
1639 WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++))r100_mm_wreg(rdev, (0x2a48), (((__uint32_t)(*(__uint32_t *)(new_io_mc_regs
++)))), 0)
;
1640 } else {
1641 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)])r100_mm_wreg(rdev, (0x2a44), (io_mc_regs[(i << 1)]), 0);
1642 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1])r100_mm_wreg(rdev, (0x2a48), (io_mc_regs[(i << 1) + 1])
, 0)
;
1643 }
1644 }
1645 /* load the MC ucode */
1646 for (i = 0; i < ucode_size; i++) {
1647 if (rdev->new_fw)
1648 WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++))r100_mm_wreg(rdev, (0x28cc), (((__uint32_t)(*(__uint32_t *)(new_fw_data
++)))), 0)
;
1649 else
1650 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++))r100_mm_wreg(rdev, (0x28cc), ((__uint32_t)(__builtin_constant_p
(*(__uint32_t *)(fw_data++)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff0000) >> 8 | ((__uint32_t)(*(
__uint32_t *)(fw_data++)) & 0xff000000) >> 24) : __swap32md
(*(__uint32_t *)(fw_data++)))), 0)
;
1651 }
1652
1653 /* put the engine back into the active state */
1654 WREG32(MC_SEQ_SUP_CNTL, 0x00000008)r100_mm_wreg(rdev, (0x28c8), (0x00000008), 0);
1655 WREG32(MC_SEQ_SUP_CNTL, 0x00000004)r100_mm_wreg(rdev, (0x28c8), (0x00000004), 0);
1656 WREG32(MC_SEQ_SUP_CNTL, 0x00000001)r100_mm_wreg(rdev, (0x28c8), (0x00000001), 0);
1657
1658 /* wait for training to complete */
1659 for (i = 0; i < rdev->usec_timeout; i++) {
1660 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL)r100_mm_rreg(rdev, (0x28e8), 0) & TRAIN_DONE_D0(1 << 30))
1661 break;
1662 udelay(1);
1663 }
1664 for (i = 0; i < rdev->usec_timeout; i++) {
1665 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL)r100_mm_rreg(rdev, (0x28e8), 0) & TRAIN_DONE_D1(1 << 31))
1666 break;
1667 udelay(1);
1668 }
1669 }
1670
1671 return 0;
1672}
1673
1674static int si_init_microcode(struct radeon_device *rdev)
1675{
1676 const char *chip_name;
1677 const char *new_chip_name;
1678 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
1679 size_t smc_req_size, mc2_req_size;
1680 char fw_name[30];
1681 int err;
1682 int new_fw = 0;
1683 bool_Bool new_smc = false0;
1684 bool_Bool si58_fw = false0;
1685 bool_Bool banks2_fw = false0;
1686
1687 DRM_DEBUG("\n")__drm_dbg(DRM_UT_CORE, "\n");
1688
1689 switch (rdev->family) {
1
Control jumps to 'case CHIP_HAINAN:' at line 1755
1690 case CHIP_TAHITI:
1691 chip_name = "TAHITI";
1692 new_chip_name = "tahiti";
1693 pfp_req_size = SI_PFP_UCODE_SIZE2144 * 4;
1694 me_req_size = SI_PM4_UCODE_SIZE2144 * 4;
1695 ce_req_size = SI_CE_UCODE_SIZE2144 * 4;
1696 rlc_req_size = SI_RLC_UCODE_SIZE2048 * 4;
1697 mc_req_size = SI_MC_UCODE_SIZE7769 * 4;
1698 mc2_req_size = TAHITI_MC_UCODE_SIZE7808 * 4;
1699 smc_req_size = roundup2(TAHITI_SMC_UCODE_SIZE, 4)(((0xf458) + ((4) - 1)) & (~((__typeof(0xf458))(4) - 1)));
1700 break;
1701 case CHIP_PITCAIRN:
1702 chip_name = "PITCAIRN";
1703 if ((rdev->pdev->revision == 0x81) &&
1704 ((rdev->pdev->device == 0x6810) ||
1705 (rdev->pdev->device == 0x6811)))
1706 new_smc = true1;
1707 new_chip_name = "pitcairn";
1708 pfp_req_size = SI_PFP_UCODE_SIZE2144 * 4;
1709 me_req_size = SI_PM4_UCODE_SIZE2144 * 4;
1710 ce_req_size = SI_CE_UCODE_SIZE2144 * 4;
1711 rlc_req_size = SI_RLC_UCODE_SIZE2048 * 4;
1712 mc_req_size = SI_MC_UCODE_SIZE7769 * 4;
1713 mc2_req_size = PITCAIRN_MC_UCODE_SIZE7775 * 4;
1714 smc_req_size = roundup2(PITCAIRN_SMC_UCODE_SIZE, 4)(((0xe9f4) + ((4) - 1)) & (~((__typeof(0xe9f4))(4) - 1)));
1715 break;
1716 case CHIP_VERDE:
1717 chip_name = "VERDE";
1718 if (((rdev->pdev->device == 0x6820) &&
1719 ((rdev->pdev->revision == 0x81) ||
1720 (rdev->pdev->revision == 0x83))) ||
1721 ((rdev->pdev->device == 0x6821) &&
1722 ((rdev->pdev->revision == 0x83) ||
1723 (rdev->pdev->revision == 0x87))) ||
1724 ((rdev->pdev->revision == 0x87) &&
1725 ((rdev->pdev->device == 0x6823) ||
1726 (rdev->pdev->device == 0x682b))))
1727 new_smc = true1;
1728 new_chip_name = "verde";
1729 pfp_req_size = SI_PFP_UCODE_SIZE2144 * 4;
1730 me_req_size = SI_PM4_UCODE_SIZE2144 * 4;
1731 ce_req_size = SI_CE_UCODE_SIZE2144 * 4;
1732 rlc_req_size = SI_RLC_UCODE_SIZE2048 * 4;
1733 mc_req_size = SI_MC_UCODE_SIZE7769 * 4;
1734 mc2_req_size = VERDE_MC_UCODE_SIZE7875 * 4;
1735 smc_req_size = roundup2(VERDE_SMC_UCODE_SIZE, 4)(((0xebe4) + ((4) - 1)) & (~((__typeof(0xebe4))(4) - 1)));
1736 break;
1737 case CHIP_OLAND:
1738 chip_name = "OLAND";
1739 if (((rdev->pdev->revision == 0x81) &&
1740 ((rdev->pdev->device == 0x6600) ||
1741 (rdev->pdev->device == 0x6604) ||
1742 (rdev->pdev->device == 0x6605) ||
1743 (rdev->pdev->device == 0x6610))) ||
1744 ((rdev->pdev->revision == 0x83) &&
1745 (rdev->pdev->device == 0x6610)))
1746 new_smc = true1;
1747 new_chip_name = "oland";
1748 pfp_req_size = SI_PFP_UCODE_SIZE2144 * 4;
1749 me_req_size = SI_PM4_UCODE_SIZE2144 * 4;
1750 ce_req_size = SI_CE_UCODE_SIZE2144 * 4;
1751 rlc_req_size = SI_RLC_UCODE_SIZE2048 * 4;
1752 mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE7863 * 4;
1753 smc_req_size = roundup2(OLAND_SMC_UCODE_SIZE, 4)(((0xe7b4) + ((4) - 1)) & (~((__typeof(0xe7b4))(4) - 1)));
1754 break;
1755 case CHIP_HAINAN:
1756 chip_name = "HAINAN";
1757 if (((rdev->pdev->revision == 0x81) &&
2
Assuming field 'revision' is not equal to 129
1758 (rdev->pdev->device == 0x6660)) ||
1759 ((rdev->pdev->revision == 0x83) &&
3
Assuming field 'revision' is not equal to 131
1760 ((rdev->pdev->device == 0x6660) ||
1761 (rdev->pdev->device == 0x6663) ||
1762 (rdev->pdev->device == 0x6665) ||
1763 (rdev->pdev->device == 0x6667))))
1764 new_smc = true1;
1765 else if ((rdev->pdev->revision == 0xc3) &&
4
Assuming field 'revision' is not equal to 195
1766 (rdev->pdev->device == 0x6665))
1767 banks2_fw = true1;
1768 new_chip_name = "hainan";
1769 pfp_req_size = SI_PFP_UCODE_SIZE2144 * 4;
1770 me_req_size = SI_PM4_UCODE_SIZE2144 * 4;
1771 ce_req_size = SI_CE_UCODE_SIZE2144 * 4;
1772 rlc_req_size = SI_RLC_UCODE_SIZE2048 * 4;
1773 mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE7863 * 4;
1774 smc_req_size = roundup2(HAINAN_SMC_UCODE_SIZE, 4)(((0xe67C) + ((4) - 1)) & (~((__typeof(0xe67C))(4) - 1)));
1775 break;
5
Execution continues on line 1780
1776 default: BUG()do { panic("BUG at %s:%d", "/usr/src/sys/dev/pci/drm/radeon/si.c"
, 1776); } while (0)
;
1777 }
1778
1779 /* this memory configuration requires special firmware */
1780 if (((RREG32(MC_SEQ_MISC0)r100_mm_rreg(rdev, (0x2a00), 0) & 0xff000000) >> 24) == 0x58)
6
Calling 'r100_mm_rreg'
10
Returning from 'r100_mm_rreg'
11
Assuming the condition is false
12
Taking false branch
1781 si58_fw = true1;
1782
1783 DRM_INFO("Loading %s Microcode\n", new_chip_name)printk("\0016" "[" "drm" "] " "Loading %s Microcode\n", new_chip_name
)
;
1784
1785 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
1786 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
13
Calling 'request_firmware'
17
Returning from 'request_firmware'
1787 if (err
17.1
'err' is 0
17.1
'err' is 0
17.1
'err' is 0
) {
18
Taking false branch
1788 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1789 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1790 if (err)
1791 goto out;
1792 if (rdev->pfp_fw->size != pfp_req_size) {
1793 pr_err("si_cp: Bogus length %zu in firmware \"%s\"\n",printk("\0013" "si_cp: Bogus length %zu in firmware \"%s\"\n"
, rdev->pfp_fw->size, fw_name)
1794 rdev->pfp_fw->size, fw_name)printk("\0013" "si_cp: Bogus length %zu in firmware \"%s\"\n"
, rdev->pfp_fw->size, fw_name)
;
1795 err = -EINVAL22;
1796 goto out;
1797 }
1798 } else {
1799 err = radeon_ucode_validate(rdev->pfp_fw);
1800 if (err) {
19
Assuming 'err' is 0
20
Taking false branch
1801 pr_err("si_cp: validation failed for firmware \"%s\"\n",printk("\0013" "si_cp: validation failed for firmware \"%s\"\n"
, fw_name)
1802 fw_name)printk("\0013" "si_cp: validation failed for firmware \"%s\"\n"
, fw_name)
;
1803 goto out;
1804 } else {
1805 new_fw++;
1806 }
1807 }
1808
1809 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
1810 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
21
Calling 'request_firmware'
25
Returning from 'request_firmware'
1811 if (err
25.1
'err' is 0
25.1
'err' is 0
25.1
'err' is 0
) {
26
Taking false branch
1812 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1813 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1814 if (err)
1815 goto out;
1816 if (rdev->me_fw->size != me_req_size) {
1817 pr_err("si_cp: Bogus length %zu in firmware \"%s\"\n",printk("\0013" "si_cp: Bogus length %zu in firmware \"%s\"\n"
, rdev->me_fw->size, fw_name)
1818 rdev->me_fw->size, fw_name)printk("\0013" "si_cp: Bogus length %zu in firmware \"%s\"\n"
, rdev->me_fw->size, fw_name)
;
1819 err = -EINVAL22;
1820 }
1821 } else {
1822 err = radeon_ucode_validate(rdev->me_fw);
1823 if (err) {
27
Assuming 'err' is 0
28
Taking false branch
1824 pr_err("si_cp: validation failed for firmware \"%s\"\n",printk("\0013" "si_cp: validation failed for firmware \"%s\"\n"
, fw_name)
1825 fw_name)printk("\0013" "si_cp: validation failed for firmware \"%s\"\n"
, fw_name)
;
1826 goto out;
1827 } else {
1828 new_fw++;
1829 }
1830 }
1831
1832 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
1833 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
29
Calling 'request_firmware'
33
Returning from 'request_firmware'
1834 if (err
33.1
'err' is 0
33.1
'err' is 0
33.1
'err' is 0
) {
34
Taking false branch
1835 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
1836 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1837 if (err)
1838 goto out;
1839 if (rdev->ce_fw->size != ce_req_size) {
1840 pr_err("si_cp: Bogus length %zu in firmware \"%s\"\n",printk("\0013" "si_cp: Bogus length %zu in firmware \"%s\"\n"
, rdev->ce_fw->size, fw_name)
1841 rdev->ce_fw->size, fw_name)printk("\0013" "si_cp: Bogus length %zu in firmware \"%s\"\n"
, rdev->ce_fw->size, fw_name)
;
1842 err = -EINVAL22;
1843 }
1844 } else {
1845 err = radeon_ucode_validate(rdev->ce_fw);
1846 if (err) {
35
Assuming 'err' is 0
36
Taking false branch
1847 pr_err("si_cp: validation failed for firmware \"%s\"\n",printk("\0013" "si_cp: validation failed for firmware \"%s\"\n"
, fw_name)
1848 fw_name)printk("\0013" "si_cp: validation failed for firmware \"%s\"\n"
, fw_name)
;
1849 goto out;
1850 } else {
1851 new_fw++;
1852 }
1853 }
1854
1855 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
1856 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
37
Calling 'request_firmware'
41
Returning from 'request_firmware'
1857 if (err
41.1
'err' is 0
41.1
'err' is 0
41.1
'err' is 0
) {
42
Taking false branch
1858 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
1859 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1860 if (err)
1861 goto out;
1862 if (rdev->rlc_fw->size != rlc_req_size) {
1863 pr_err("si_rlc: Bogus length %zu in firmware \"%s\"\n",printk("\0013" "si_rlc: Bogus length %zu in firmware \"%s\"\n"
, rdev->rlc_fw->size, fw_name)
1864 rdev->rlc_fw->size, fw_name)printk("\0013" "si_rlc: Bogus length %zu in firmware \"%s\"\n"
, rdev->rlc_fw->size, fw_name)
;
1865 err = -EINVAL22;
1866 }
1867 } else {
1868 err = radeon_ucode_validate(rdev->rlc_fw);
1869 if (err) {
43
Assuming 'err' is 0
44
Taking false branch
1870 pr_err("si_cp: validation failed for firmware \"%s\"\n",printk("\0013" "si_cp: validation failed for firmware \"%s\"\n"
, fw_name)
1871 fw_name)printk("\0013" "si_cp: validation failed for firmware \"%s\"\n"
, fw_name)
;
1872 goto out;
1873 } else {
1874 new_fw++;
1875 }
1876 }
1877
1878 if (si58_fw
44.1
'si58_fw' is false
44.1
'si58_fw' is false
44.1
'si58_fw' is false
)
45
Taking false branch
1879 snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
1880 else
1881 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
1882 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
46
Calling 'request_firmware'
50
Returning from 'request_firmware'
1883 if (err
50.1
'err' is 0
50.1
'err' is 0
50.1
'err' is 0
) {
51
Taking false branch
1884 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
1885 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1886 if (err) {
1887 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1888 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1889 if (err)
1890 goto out;
1891 }
1892 if ((rdev->mc_fw->size != mc_req_size) &&
1893 (rdev->mc_fw->size != mc2_req_size)) {
1894 pr_err("si_mc: Bogus length %zu in firmware \"%s\"\n",printk("\0013" "si_mc: Bogus length %zu in firmware \"%s\"\n"
, rdev->mc_fw->size, fw_name)
1895 rdev->mc_fw->size, fw_name)printk("\0013" "si_mc: Bogus length %zu in firmware \"%s\"\n"
, rdev->mc_fw->size, fw_name)
;
1896 err = -EINVAL22;
1897 }
1898 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size)printk("\0016" "[" "drm" "] " "%s: %zu bytes\n", fw_name, rdev
->mc_fw->size)
;
1899 } else {
1900 err = radeon_ucode_validate(rdev->mc_fw);
1901 if (err) {
52
Assuming 'err' is 0
53
Taking false branch
1902 pr_err("si_cp: validation failed for firmware \"%s\"\n",printk("\0013" "si_cp: validation failed for firmware \"%s\"\n"
, fw_name)
1903 fw_name)printk("\0013" "si_cp: validation failed for firmware \"%s\"\n"
, fw_name)
;
1904 goto out;
1905 } else {
1906 new_fw++;
1907 }
1908 }
1909
1910 if (banks2_fw
53.1
'banks2_fw' is false
53.1
'banks2_fw' is false
53.1
'banks2_fw' is false
)
54
Taking false branch
1911 snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
1912 else if (new_smc
54.1
'new_smc' is false
54.1
'new_smc' is false
54.1
'new_smc' is false
)
55
Taking false branch
1913 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
1914 else
1915 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
1916 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1917 if (err) {
56
Assuming 'err' is not equal to 0
57
Taking true branch
1918 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1919 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
58
Calling 'request_firmware'
62
Returning from 'request_firmware'
1920 if (err) {
63
Assuming 'err' is 0
64
Taking false branch
1921 pr_err("smc: error loading firmware \"%s\"\n", fw_name)printk("\0013" "smc: error loading firmware \"%s\"\n", fw_name
)
;
1922 release_firmware(rdev->smc_fw);
1923 rdev->smc_fw = NULL((void *)0);
1924 err = 0;
1925 } else if (rdev->smc_fw->size != smc_req_size) {
65
Access to field 'size' results in a dereference of a null pointer (loaded from field 'smc_fw')
1926 pr_err("si_smc: Bogus length %zu in firmware \"%s\"\n",printk("\0013" "si_smc: Bogus length %zu in firmware \"%s\"\n"
, rdev->smc_fw->size, fw_name)
1927 rdev->smc_fw->size, fw_name)printk("\0013" "si_smc: Bogus length %zu in firmware \"%s\"\n"
, rdev->smc_fw->size, fw_name)
;
1928 err = -EINVAL22;
1929 }
1930 } else {
1931 err = radeon_ucode_validate(rdev->smc_fw);
1932 if (err) {
1933 pr_err("si_cp: validation failed for firmware \"%s\"\n",printk("\0013" "si_cp: validation failed for firmware \"%s\"\n"
, fw_name)
1934 fw_name)printk("\0013" "si_cp: validation failed for firmware \"%s\"\n"
, fw_name)
;
1935 goto out;
1936 } else {
1937 new_fw++;
1938 }
1939 }
1940
1941 if (new_fw == 0) {
1942 rdev->new_fw = false0;
1943 } else if (new_fw < 6) {
1944 pr_err("si_fw: mixing new and old firmware!\n")printk("\0013" "si_fw: mixing new and old firmware!\n");
1945 err = -EINVAL22;
1946 } else {
1947 rdev->new_fw = true1;
1948 }
1949out:
1950 if (err) {
1951 if (err != -EINVAL22)
1952 pr_err("si_cp: Failed to load firmware \"%s\"\n",printk("\0013" "si_cp: Failed to load firmware \"%s\"\n", fw_name
)
1953 fw_name)printk("\0013" "si_cp: Failed to load firmware \"%s\"\n", fw_name
)
;
1954 release_firmware(rdev->pfp_fw);
1955 rdev->pfp_fw = NULL((void *)0);
1956 release_firmware(rdev->me_fw);
1957 rdev->me_fw = NULL((void *)0);
1958 release_firmware(rdev->ce_fw);
1959 rdev->ce_fw = NULL((void *)0);
1960 release_firmware(rdev->rlc_fw);
1961 rdev->rlc_fw = NULL((void *)0);
1962 release_firmware(rdev->mc_fw);
1963 rdev->mc_fw = NULL((void *)0);
1964 release_firmware(rdev->smc_fw);
1965 rdev->smc_fw = NULL((void *)0);
1966 }
1967 return err;
1968}
1969
1970/* watermark setup */
1971static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1972 struct radeon_crtc *radeon_crtc,
1973 struct drm_display_mode *mode,
1974 struct drm_display_mode *other_mode)
1975{
1976 u32 tmp, buffer_alloc, i;
1977 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
1978 /*
1979 * Line Buffer Setup
1980 * There are 3 line buffers, each one shared by 2 display controllers.
1981 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1982 * the display controllers. The paritioning is done via one of four
1983 * preset allocations specified in bits 21:20:
1984 * 0 - half lb
1985 * 2 - whole lb, other crtc must be disabled
1986 */
1987 /* this can get tricky if we have two large displays on a paired group
1988 * of crtcs. Ideally for multiple large displays we'd assign them to
1989 * non-linked crtcs for maximum line buffer allocation.
1990 */
1991 if (radeon_crtc->base.enabled && mode) {
1992 if (other_mode) {
1993 tmp = 0; /* 1/2 */
1994 buffer_alloc = 1;
1995 } else {
1996 tmp = 2; /* whole */
1997 buffer_alloc = 2;
1998 }
1999 } else {
2000 tmp = 0;
2001 buffer_alloc = 0;
2002 }
2003
2004 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,r100_mm_wreg(rdev, (0x6b0c + radeon_crtc->crtc_offset), ((
(tmp) << 20)), 0)
2005 DC_LB_MEMORY_CONFIG(tmp))r100_mm_wreg(rdev, (0x6b0c + radeon_crtc->crtc_offset), ((
(tmp) << 20)), 0)
;
2006
2007 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,r100_mm_wreg(rdev, (0x0ca0 + pipe_offset), (((buffer_alloc) <<
0)), 0)
2008 DMIF_BUFFERS_ALLOCATED(buffer_alloc))r100_mm_wreg(rdev, (0x0ca0 + pipe_offset), (((buffer_alloc) <<
0)), 0)
;
2009 for (i = 0; i < rdev->usec_timeout; i++) {
2010 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset)r100_mm_rreg(rdev, (0x0ca0 + pipe_offset), 0) &
2011 DMIF_BUFFERS_ALLOCATED_COMPLETED(1 << 4))
2012 break;
2013 udelay(1);
2014 }
2015
2016 if (radeon_crtc->base.enabled && mode) {
2017 switch (tmp) {
2018 case 0:
2019 default:
2020 return 4096 * 2;
2021 case 2:
2022 return 8192 * 2;
2023 }
2024 }
2025
2026 /* controller not enabled, so no lb used */
2027 return 0;
2028}
2029
2030static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
2031{
2032 u32 tmp = RREG32(MC_SHARED_CHMAP)r100_mm_rreg(rdev, (0x2004), 0);
2033
2034 switch ((tmp & NOOFCHAN_MASK0x0000f000) >> NOOFCHAN_SHIFT12) {
2035 case 0:
2036 default:
2037 return 1;
2038 case 1:
2039 return 2;
2040 case 2:
2041 return 4;
2042 case 3:
2043 return 8;
2044 case 4:
2045 return 3;
2046 case 5:
2047 return 6;
2048 case 6:
2049 return 10;
2050 case 7:
2051 return 12;
2052 case 8:
2053 return 16;
2054 }
2055}
2056
2057struct dce6_wm_params {
2058 u32 dram_channels; /* number of dram channels */
2059 u32 yclk; /* bandwidth per dram data pin in kHz */
2060 u32 sclk; /* engine clock in kHz */
2061 u32 disp_clk; /* display clock in kHz */
2062 u32 src_width; /* viewport width */
2063 u32 active_time; /* active display time in ns */
2064 u32 blank_time; /* blank time in ns */
2065 bool_Bool interlaced; /* mode is interlaced */
2066 fixed20_12 vsc; /* vertical scale ratio */
2067 u32 num_heads; /* number of active crtcs */
2068 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
2069 u32 lb_size; /* line buffer allocated to pipe */
2070 u32 vtaps; /* vertical scaler taps */
2071};
2072
2073static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
2074{
2075 /* Calculate raw DRAM Bandwidth */
2076 fixed20_12 dram_efficiency; /* 0.7 */
2077 fixed20_12 yclk, dram_channels, bandwidth;
2078 fixed20_12 a;
2079
2080 a.full = dfixed_const(1000)(u32)(((1000) << 12));
2081 yclk.full = dfixed_const(wm->yclk)(u32)(((wm->yclk) << 12));
2082 yclk.full = dfixed_div(yclk, a);
2083 dram_channels.full = dfixed_const(wm->dram_channels * 4)(u32)(((wm->dram_channels * 4) << 12));
2084 a.full = dfixed_const(10)(u32)(((10) << 12));
2085 dram_efficiency.full = dfixed_const(7)(u32)(((7) << 12));
2086 dram_efficiency.full = dfixed_div(dram_efficiency, a);
2087 bandwidth.full = dfixed_mul(dram_channels, yclk)((u64)((u64)(dram_channels).full * (yclk).full + 2048) >>
12)
;
2088 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency)((u64)((u64)(bandwidth).full * (dram_efficiency).full + 2048)
>> 12)
;
2089
2090 return dfixed_trunc(bandwidth)((bandwidth).full >> 12);
2091}
2092
2093static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
2094{
2095 /* Calculate DRAM Bandwidth and the part allocated to display. */
2096 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
2097 fixed20_12 yclk, dram_channels, bandwidth;
2098 fixed20_12 a;
2099
2100 a.full = dfixed_const(1000)(u32)(((1000) << 12));
2101 yclk.full = dfixed_const(wm->yclk)(u32)(((wm->yclk) << 12));
2102 yclk.full = dfixed_div(yclk, a);
2103 dram_channels.full = dfixed_const(wm->dram_channels * 4)(u32)(((wm->dram_channels * 4) << 12));
2104 a.full = dfixed_const(10)(u32)(((10) << 12));
2105 disp_dram_allocation.full = dfixed_const(3)(u32)(((3) << 12)); /* XXX worse case value 0.3 */
2106 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
2107 bandwidth.full = dfixed_mul(dram_channels, yclk)((u64)((u64)(dram_channels).full * (yclk).full + 2048) >>
12)
;
2108 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation)((u64)((u64)(bandwidth).full * (disp_dram_allocation).full + 2048
) >> 12)
;
2109
2110 return dfixed_trunc(bandwidth)((bandwidth).full >> 12);
2111}
2112
2113static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
2114{
2115 /* Calculate the display Data return Bandwidth */
2116 fixed20_12 return_efficiency; /* 0.8 */
2117 fixed20_12 sclk, bandwidth;
2118 fixed20_12 a;
2119
2120 a.full = dfixed_const(1000)(u32)(((1000) << 12));
2121 sclk.full = dfixed_const(wm->sclk)(u32)(((wm->sclk) << 12));
2122 sclk.full = dfixed_div(sclk, a);
2123 a.full = dfixed_const(10)(u32)(((10) << 12));
2124 return_efficiency.full = dfixed_const(8)(u32)(((8) << 12));
2125 return_efficiency.full = dfixed_div(return_efficiency, a);
2126 a.full = dfixed_const(32)(u32)(((32) << 12));
2127 bandwidth.full = dfixed_mul(a, sclk)((u64)((u64)(a).full * (sclk).full + 2048) >> 12);
2128 bandwidth.full = dfixed_mul(bandwidth, return_efficiency)((u64)((u64)(bandwidth).full * (return_efficiency).full + 2048
) >> 12)
;
2129
2130 return dfixed_trunc(bandwidth)((bandwidth).full >> 12);
2131}
2132
2133static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
2134{
2135 return 32;
2136}
2137
2138static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
2139{
2140 /* Calculate the DMIF Request Bandwidth */
2141 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
2142 fixed20_12 disp_clk, sclk, bandwidth;
2143 fixed20_12 a, b1, b2;
2144 u32 min_bandwidth;
2145
2146 a.full = dfixed_const(1000)(u32)(((1000) << 12));
2147 disp_clk.full = dfixed_const(wm->disp_clk)(u32)(((wm->disp_clk) << 12));
2148 disp_clk.full = dfixed_div(disp_clk, a);
2149 a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2)(u32)(((dce6_get_dmif_bytes_per_request(wm) / 2) << 12)
)
;
2150 b1.full = dfixed_mul(a, disp_clk)((u64)((u64)(a).full * (disp_clk).full + 2048) >> 12);
2151
2152 a.full = dfixed_const(1000)(u32)(((1000) << 12));
2153 sclk.full = dfixed_const(wm->sclk)(u32)(((wm->sclk) << 12));
2154 sclk.full = dfixed_div(sclk, a);
2155 a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm))(u32)(((dce6_get_dmif_bytes_per_request(wm)) << 12));
2156 b2.full = dfixed_mul(a, sclk)((u64)((u64)(a).full * (sclk).full + 2048) >> 12);
2157
2158 a.full = dfixed_const(10)(u32)(((10) << 12));
2159 disp_clk_request_efficiency.full = dfixed_const(8)(u32)(((8) << 12));
2160 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
2161
2162 min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2))(((((b1).full >> 12))<(((b2).full >> 12)))?(((
b1).full >> 12)):(((b2).full >> 12)))
;
2163
2164 a.full = dfixed_const(min_bandwidth)(u32)(((min_bandwidth) << 12));
2165 bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency)((u64)((u64)(a).full * (disp_clk_request_efficiency).full + 2048
) >> 12)
;
2166
2167 return dfixed_trunc(bandwidth)((bandwidth).full >> 12);
2168}
2169
2170static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
2171{
2172 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
2173 u32 dram_bandwidth = dce6_dram_bandwidth(wm);
2174 u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
2175 u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
2176
2177 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth))(((dram_bandwidth)<((((data_return_bandwidth)<(dmif_req_bandwidth
))?(data_return_bandwidth):(dmif_req_bandwidth))))?(dram_bandwidth
):((((data_return_bandwidth)<(dmif_req_bandwidth))?(data_return_bandwidth
):(dmif_req_bandwidth))))
;
2178}
2179
2180static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
2181{
2182 /* Calculate the display mode Average Bandwidth
2183 * DisplayMode should contain the source and destination dimensions,
2184 * timing, etc.
2185 */
2186 fixed20_12 bpp;
2187 fixed20_12 line_time;
2188 fixed20_12 src_width;
2189 fixed20_12 bandwidth;
2190 fixed20_12 a;
2191
2192 a.full = dfixed_const(1000)(u32)(((1000) << 12));
2193 line_time.full = dfixed_const(wm->active_time + wm->blank_time)(u32)(((wm->active_time + wm->blank_time) << 12));
2194 line_time.full = dfixed_div(line_time, a);
2195 bpp.full = dfixed_const(wm->bytes_per_pixel)(u32)(((wm->bytes_per_pixel) << 12));
2196 src_width.full = dfixed_const(wm->src_width)(u32)(((wm->src_width) << 12));
2197 bandwidth.full = dfixed_mul(src_width, bpp)((u64)((u64)(src_width).full * (bpp).full + 2048) >> 12
)
;
2198 bandwidth.full = dfixed_mul(bandwidth, wm->vsc)((u64)((u64)(bandwidth).full * (wm->vsc).full + 2048) >>
12)
;
2199 bandwidth.full = dfixed_div(bandwidth, line_time);
2200
2201 return dfixed_trunc(bandwidth)((bandwidth).full >> 12);
2202}
2203
2204static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
2205{
2206 /* First calcualte the latency in ns */
2207 u32 mc_latency = 2000; /* 2000 ns. */
2208 u32 available_bandwidth = dce6_available_bandwidth(wm);
2209 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
2210 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
2211 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
2212 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
2213 (wm->num_heads * cursor_line_pair_return_time);
2214 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
2215 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
2216 u32 tmp, dmif_size = 12288;
2217 fixed20_12 a, b, c;
2218
2219 if (wm->num_heads == 0)
2220 return 0;
2221
2222 a.full = dfixed_const(2)(u32)(((2) << 12));
2223 b.full = dfixed_const(1)(u32)(((1) << 12));
2224 if ((wm->vsc.full > a.full) ||
2225 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
2226 (wm->vtaps >= 5) ||
2227 ((wm->vsc.full >= a.full) && wm->interlaced))
2228 max_src_lines_per_dst_line = 4;
2229 else
2230 max_src_lines_per_dst_line = 2;
2231
2232 a.full = dfixed_const(available_bandwidth)(u32)(((available_bandwidth) << 12));
2233 b.full = dfixed_const(wm->num_heads)(u32)(((wm->num_heads) << 12));
2234 a.full = dfixed_div(a, b);
2235 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
2236 tmp = min(dfixed_trunc(a), tmp)(((((a).full >> 12))<(tmp))?(((a).full >> 12))
:(tmp))
;
2237
2238 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000)(((tmp)<(wm->disp_clk * wm->bytes_per_pixel / 1000))
?(tmp):(wm->disp_clk * wm->bytes_per_pixel / 1000))
;
2239
2240 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel)(u32)(((max_src_lines_per_dst_line * wm->src_width * wm->
bytes_per_pixel) << 12))
;
2241 b.full = dfixed_const(1000)(u32)(((1000) << 12));
2242 c.full = dfixed_const(lb_fill_bw)(u32)(((lb_fill_bw) << 12));
2243 b.full = dfixed_div(c, b);
2244 a.full = dfixed_div(a, b);
2245 line_fill_time = dfixed_trunc(a)((a).full >> 12);
2246
2247 if (line_fill_time < wm->active_time)
2248 return latency;
2249 else
2250 return latency + (line_fill_time - wm->active_time);
2251
2252}
2253
2254static bool_Bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
2255{
2256 if (dce6_average_bandwidth(wm) <=
2257 (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
2258 return true1;
2259 else
2260 return false0;
2261};
2262
2263static bool_Bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
2264{
2265 if (dce6_average_bandwidth(wm) <=
2266 (dce6_available_bandwidth(wm) / wm->num_heads))
2267 return true1;
2268 else
2269 return false0;
2270};
2271
2272static bool_Bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
2273{
2274 u32 lb_partitions = wm->lb_size / wm->src_width;
2275 u32 line_time = wm->active_time + wm->blank_time;
2276 u32 latency_tolerant_lines;
2277 u32 latency_hiding;
2278 fixed20_12 a;
2279
2280 a.full = dfixed_const(1)(u32)(((1) << 12));
2281 if (wm->vsc.full > a.full)
2282 latency_tolerant_lines = 1;
2283 else {
2284 if (lb_partitions <= (wm->vtaps + 1))
2285 latency_tolerant_lines = 1;
2286 else
2287 latency_tolerant_lines = 2;
2288 }
2289
2290 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
2291
2292 if (dce6_latency_watermark(wm) <= latency_hiding)
2293 return true1;
2294 else
2295 return false0;
2296}
2297
2298static void dce6_program_watermarks(struct radeon_device *rdev,
2299 struct radeon_crtc *radeon_crtc,
2300 u32 lb_size, u32 num_heads)
2301{
2302 struct drm_display_mode *mode = &radeon_crtc->base.mode;
2303 struct dce6_wm_params wm_low, wm_high;
2304 u32 dram_channels;
2305 u32 active_time;
2306 u32 line_time = 0;
2307 u32 latency_watermark_a = 0, latency_watermark_b = 0;
2308 u32 priority_a_mark = 0, priority_b_mark = 0;
2309 u32 priority_a_cnt = PRIORITY_OFF(1 << 16);
2310 u32 priority_b_cnt = PRIORITY_OFF(1 << 16);
2311 u32 tmp, arb_control3;
2312 fixed20_12 a, b, c;
2313
2314 if (radeon_crtc->base.enabled && num_heads && mode) {
2315 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
2316 (u32)mode->clock);
2317 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
2318 (u32)mode->clock);
2319 line_time = min(line_time, (u32)65535)(((line_time)<((u32)65535))?(line_time):((u32)65535));
2320 priority_a_cnt = 0;
2321 priority_b_cnt = 0;
2322
2323 if (rdev->family == CHIP_ARUBA)
2324 dram_channels = evergreen_get_number_of_dram_channels(rdev);
2325 else
2326 dram_channels = si_get_number_of_dram_channels(rdev);
2327
2328 /* watermark for high clocks */
2329 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2330 wm_high.yclk =
2331 radeon_dpm_get_mclk(rdev, false)rdev->asic->dpm.get_mclk((rdev), (0)) * 10;
2332 wm_high.sclk =
2333 radeon_dpm_get_sclk(rdev, false)rdev->asic->dpm.get_sclk((rdev), (0)) * 10;
2334 } else {
2335 wm_high.yclk = rdev->pm.current_mclk * 10;
2336 wm_high.sclk = rdev->pm.current_sclk * 10;
2337 }
2338
2339 wm_high.disp_clk = mode->clock;
2340 wm_high.src_width = mode->crtc_hdisplay;
2341 wm_high.active_time = active_time;
2342 wm_high.blank_time = line_time - wm_high.active_time;
2343 wm_high.interlaced = false0;
2344 if (mode->flags & DRM_MODE_FLAG_INTERLACE(1<<4))
2345 wm_high.interlaced = true1;
2346 wm_high.vsc = radeon_crtc->vsc;
2347 wm_high.vtaps = 1;
2348 if (radeon_crtc->rmx_type != RMX_OFF)
2349 wm_high.vtaps = 2;
2350 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2351 wm_high.lb_size = lb_size;
2352 wm_high.dram_channels = dram_channels;
2353 wm_high.num_heads = num_heads;
2354
2355 /* watermark for low clocks */
2356 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2357 wm_low.yclk =
2358 radeon_dpm_get_mclk(rdev, true)rdev->asic->dpm.get_mclk((rdev), (1)) * 10;
2359 wm_low.sclk =
2360 radeon_dpm_get_sclk(rdev, true)rdev->asic->dpm.get_sclk((rdev), (1)) * 10;
2361 } else {
2362 wm_low.yclk = rdev->pm.current_mclk * 10;
2363 wm_low.sclk = rdev->pm.current_sclk * 10;
2364 }
2365
2366 wm_low.disp_clk = mode->clock;
2367 wm_low.src_width = mode->crtc_hdisplay;
2368 wm_low.active_time = active_time;
2369 wm_low.blank_time = line_time - wm_low.active_time;
2370 wm_low.interlaced = false0;
2371 if (mode->flags & DRM_MODE_FLAG_INTERLACE(1<<4))
2372 wm_low.interlaced = true1;
2373 wm_low.vsc = radeon_crtc->vsc;
2374 wm_low.vtaps = 1;
2375 if (radeon_crtc->rmx_type != RMX_OFF)
2376 wm_low.vtaps = 2;
2377 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2378 wm_low.lb_size = lb_size;
2379 wm_low.dram_channels = dram_channels;
2380 wm_low.num_heads = num_heads;
2381
2382 /* set for high clocks */
2383 latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535)(((dce6_latency_watermark(&wm_high))<((u32)65535))?(dce6_latency_watermark
(&wm_high)):((u32)65535))
;
2384 /* set for low clocks */
2385 latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535)(((dce6_latency_watermark(&wm_low))<((u32)65535))?(dce6_latency_watermark
(&wm_low)):((u32)65535))
;
2386
2387 /* possibly force display priority to high */
2388 /* should really do this at mode validation time... */
2389 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
2390 !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
2391 !dce6_check_latency_hiding(&wm_high) ||
2392 (rdev->disp_priority == 2)) {
2393 DRM_DEBUG_KMS("force priority to high\n")__drm_dbg(DRM_UT_KMS, "force priority to high\n");
2394 priority_a_cnt |= PRIORITY_ALWAYS_ON(1 << 20);
2395 priority_b_cnt |= PRIORITY_ALWAYS_ON(1 << 20);
2396 }
2397 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2398 !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2399 !dce6_check_latency_hiding(&wm_low) ||
2400 (rdev->disp_priority == 2)) {
2401 DRM_DEBUG_KMS("force priority to high\n")__drm_dbg(DRM_UT_KMS, "force priority to high\n");
2402 priority_a_cnt |= PRIORITY_ALWAYS_ON(1 << 20);
2403 priority_b_cnt |= PRIORITY_ALWAYS_ON(1 << 20);
2404 }
2405
2406 a.full = dfixed_const(1000)(u32)(((1000) << 12));
2407 b.full = dfixed_const(mode->clock)(u32)(((mode->clock) << 12));
2408 b.full = dfixed_div(b, a);
2409 c.full = dfixed_const(latency_watermark_a)(u32)(((latency_watermark_a) << 12));
2410 c.full = dfixed_mul(c, b)((u64)((u64)(c).full * (b).full + 2048) >> 12);
2411 c.full = dfixed_mul(c, radeon_crtc->hsc)((u64)((u64)(c).full * (radeon_crtc->hsc).full + 2048) >>
12)
;
2412 c.full = dfixed_div(c, a);
2413 a.full = dfixed_const(16)(u32)(((16) << 12));
2414 c.full = dfixed_div(c, a);
2415 priority_a_mark = dfixed_trunc(c)((c).full >> 12);
2416 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK0x7fff;
2417
2418 a.full = dfixed_const(1000)(u32)(((1000) << 12));
2419 b.full = dfixed_const(mode->clock)(u32)(((mode->clock) << 12));
2420 b.full = dfixed_div(b, a);
2421 c.full = dfixed_const(latency_watermark_b)(u32)(((latency_watermark_b) << 12));
2422 c.full = dfixed_mul(c, b)((u64)((u64)(c).full * (b).full + 2048) >> 12);
2423 c.full = dfixed_mul(c, radeon_crtc->hsc)((u64)((u64)(c).full * (radeon_crtc->hsc).full + 2048) >>
12)
;
2424 c.full = dfixed_div(c, a);
2425 a.full = dfixed_const(16)(u32)(((16) << 12));
2426 c.full = dfixed_div(c, a);
2427 priority_b_mark = dfixed_trunc(c)((c).full >> 12);
2428 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK0x7fff;
2429
2430 /* Save number of lines the linebuffer leads before the scanout */
2431 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay)(((lb_size) + ((mode->crtc_hdisplay) - 1)) / (mode->crtc_hdisplay
))
;
2432 }
2433
2434 /* select wm A */
2435 arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset)r100_mm_rreg(rdev, (0x6cc8 + radeon_crtc->crtc_offset), 0);
2436 tmp = arb_control3;
2437 tmp &= ~LATENCY_WATERMARK_MASK(3)((3) << 16);
2438 tmp |= LATENCY_WATERMARK_MASK(1)((1) << 16);
2439 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp)r100_mm_wreg(rdev, (0x6cc8 + radeon_crtc->crtc_offset), (tmp
), 0)
;
2440 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,r100_mm_wreg(rdev, (0x6ccc + radeon_crtc->crtc_offset), ((
((latency_watermark_a) << 0) | ((line_time) << 16
))), 0)
2441 (LATENCY_LOW_WATERMARK(latency_watermark_a) |r100_mm_wreg(rdev, (0x6ccc + radeon_crtc->crtc_offset), ((
((latency_watermark_a) << 0) | ((line_time) << 16
))), 0)
2442 LATENCY_HIGH_WATERMARK(line_time)))r100_mm_wreg(rdev, (0x6ccc + radeon_crtc->crtc_offset), ((
((latency_watermark_a) << 0) | ((line_time) << 16
))), 0)
;
2443 /* select wm B */
2444 tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset)r100_mm_rreg(rdev, (0x6cc8 + radeon_crtc->crtc_offset), 0);
2445 tmp &= ~LATENCY_WATERMARK_MASK(3)((3) << 16);
2446 tmp |= LATENCY_WATERMARK_MASK(2)((2) << 16);
2447 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp)r100_mm_wreg(rdev, (0x6cc8 + radeon_crtc->crtc_offset), (tmp
), 0)
;
2448 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,r100_mm_wreg(rdev, (0x6ccc + radeon_crtc->crtc_offset), ((
((latency_watermark_b) << 0) | ((line_time) << 16
))), 0)
2449 (LATENCY_LOW_WATERMARK(latency_watermark_b) |r100_mm_wreg(rdev, (0x6ccc + radeon_crtc->crtc_offset), ((
((latency_watermark_b) << 0) | ((line_time) << 16
))), 0)
2450 LATENCY_HIGH_WATERMARK(line_time)))r100_mm_wreg(rdev, (0x6ccc + radeon_crtc->crtc_offset), ((
((latency_watermark_b) << 0) | ((line_time) << 16
))), 0)
;
2451 /* restore original selection */
2452 WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3)r100_mm_wreg(rdev, (0x6cc8 + radeon_crtc->crtc_offset), (arb_control3
), 0)
;
2453
2454 /* write the priority marks */
2455 WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt)r100_mm_wreg(rdev, (0x6b18 + radeon_crtc->crtc_offset), (priority_a_cnt
), 0)
;
2456 WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt)r100_mm_wreg(rdev, (0x6b1c + radeon_crtc->crtc_offset), (priority_b_cnt
), 0)
;
2457
2458 /* save values for DPM */
2459 radeon_crtc->line_time = line_time;
2460 radeon_crtc->wm_high = latency_watermark_a;
2461 radeon_crtc->wm_low = latency_watermark_b;
2462}
2463
2464void dce6_bandwidth_update(struct radeon_device *rdev)
2465{
2466 struct drm_display_mode *mode0 = NULL((void *)0);
2467 struct drm_display_mode *mode1 = NULL((void *)0);
2468 u32 num_heads = 0, lb_size;
2469 int i;
2470
2471 if (!rdev->mode_info.mode_config_initialized)
2472 return;
2473
2474 radeon_update_display_priority(rdev);
2475
2476 for (i = 0; i < rdev->num_crtc; i++) {
2477 if (rdev->mode_info.crtcs[i]->base.enabled)
2478 num_heads++;
2479 }
2480 for (i = 0; i < rdev->num_crtc; i += 2) {
2481 mode0 = &rdev->mode_info.crtcs[i]->base.mode;
2482 mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
2483 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
2484 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
2485 lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
2486 dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
2487 }
2488}
2489
2490/*
2491 * Core functions
2492 */
2493static void si_tiling_mode_table_init(struct radeon_device *rdev)
2494{
2495 u32 *tile = rdev->config.si.tile_mode_array;
2496 const u32 num_tile_mode_states =
2497 ARRAY_SIZE(rdev->config.si.tile_mode_array)(sizeof((rdev->config.si.tile_mode_array)) / sizeof((rdev->
config.si.tile_mode_array)[0]))
;
2498 u32 reg_offset, split_equal_to_row_size;
2499
2500 switch (rdev->config.si.mem_row_size_in_kb) {
2501 case 1:
2502 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB4;
2503 break;
2504 case 2:
2505 default:
2506 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB5;
2507 break;
2508 case 4:
2509 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB6;
2510 break;
2511 }
2512
2513 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2514 tile[reg_offset] = 0;
2515
2516 switch(rdev->family) {
2517 case CHIP_TAHITI:
2518 case CHIP_PITCAIRN:
2519 /* non-AA compressed depth or any compressed stencil */
2520 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2521 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2522 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2523 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)((0) << 11) |
2524 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2525 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2526 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2527 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2528 /* 2xAA/4xAA compressed depth only */
2529 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2530 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2531 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2532 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)((1) << 11) |
2533 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2534 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2535 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2536 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2537 /* 8xAA compressed depth only */
2538 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2539 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2540 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2541 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2542 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2543 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2544 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2545 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2546 /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2547 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2548 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2549 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2550 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)((1) << 11) |
2551 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2552 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2553 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2554 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2555 /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2556 tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1)((2) << 2) |
2557 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2558 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2559 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)((0) << 11) |
2560 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2561 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2562 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2563 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2564 /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2565 tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2566 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2567 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2568 TILE_SPLIT(split_equal_to_row_size)((split_equal_to_row_size) << 11) |
2569 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2570 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2571 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2572 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2573 /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2574 tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2575 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2576 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2577 TILE_SPLIT(split_equal_to_row_size)((split_equal_to_row_size) << 11) |
2578 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2579 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2580 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1)((0) << 16) |
2581 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)((0) << 18));
2582 /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2583 tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2584 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2585 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2586 TILE_SPLIT(split_equal_to_row_size)((split_equal_to_row_size) << 11) |
2587 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2588 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2589 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2590 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2591 /* 1D and 1D Array Surfaces */
2592 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED)((1) << 2) |
2593 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING)((0) << 0) |
2594 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2595 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)((0) << 11) |
2596 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2597 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2598 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2599 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2600 /* Displayable maps. */
2601 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1)((2) << 2) |
2602 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING)((0) << 0) |
2603 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2604 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)((0) << 11) |
2605 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2606 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2607 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2608 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2609 /* Display 8bpp. */
2610 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2611 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING)((0) << 0) |
2612 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2613 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2614 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2615 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2616 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2617 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2618 /* Display 16bpp. */
2619 tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2620 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING)((0) << 0) |
2621 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2622 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2623 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2624 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2625 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2626 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2627 /* Display 32bpp. */
2628 tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2629 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING)((0) << 0) |
2630 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2631 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)((3) << 11) |
2632 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2633 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2634 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1)((0) << 16) |
2635 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)((0) << 18));
2636 /* Thin. */
2637 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1)((2) << 2) |
2638 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2639 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2640 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)((0) << 11) |
2641 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2642 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2643 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2644 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2645 /* Thin 8 bpp. */
2646 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2647 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2648 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2649 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2650 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2651 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2652 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2653 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)((0) << 18));
2654 /* Thin 16 bpp. */
2655 tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2656 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2657 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2658 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2659 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2660 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2661 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2662 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)((0) << 18));
2663 /* Thin 32 bpp. */
2664 tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2665 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2666 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2667 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)((3) << 11) |
2668 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2669 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2670 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1)((0) << 16) |
2671 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)((0) << 18));
2672 /* Thin 64 bpp. */
2673 tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2674 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2675 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2676 TILE_SPLIT(split_equal_to_row_size)((split_equal_to_row_size) << 11) |
2677 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2678 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2679 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1)((0) << 16) |
2680 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)((0) << 18));
2681 /* 8 bpp PRT. */
2682 tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2683 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2684 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2685 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2686 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2687 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2)((1) << 14) |
2688 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2689 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2690 /* 16 bpp PRT */
2691 tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2692 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2693 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2694 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2695 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2696 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2697 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2698 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)((2) << 18));
2699 /* 32 bpp PRT */
2700 tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2701 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2702 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2703 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2704 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2705 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2706 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2707 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2708 /* 64 bpp PRT */
2709 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2710 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2711 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2712 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)((3) << 11) |
2713 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2714 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2715 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1)((0) << 16) |
2716 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2717 /* 128 bpp PRT */
2718 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2719 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2720 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2721 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB)((4) << 11) |
2722 NUM_BANKS(ADDR_SURF_8_BANK)((2) << 20) |
2723 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2724 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1)((0) << 16) |
2725 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)((0) << 18));
2726
2727 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2728 WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset])r100_mm_wreg(rdev, (0x9910 + (reg_offset * 4)), (tile[reg_offset
]), 0)
;
2729 break;
2730
2731 case CHIP_VERDE:
2732 case CHIP_OLAND:
2733 case CHIP_HAINAN:
2734 /* non-AA compressed depth or any compressed stencil */
2735 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2736 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2737 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2738 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)((0) << 11) |
2739 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2740 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2741 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2742 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)((2) << 18));
2743 /* 2xAA/4xAA compressed depth only */
2744 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2745 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2746 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2747 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)((1) << 11) |
2748 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2749 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2750 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2751 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)((2) << 18));
2752 /* 8xAA compressed depth only */
2753 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2754 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2755 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2756 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2757 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2758 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2759 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2760 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)((2) << 18));
2761 /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2762 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2763 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2764 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2765 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)((1) << 11) |
2766 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2767 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2768 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2769 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)((2) << 18));
2770 /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2771 tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1)((2) << 2) |
2772 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2773 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2774 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)((0) << 11) |
2775 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2776 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2777 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2778 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2779 /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2780 tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2781 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2782 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2783 TILE_SPLIT(split_equal_to_row_size)((split_equal_to_row_size) << 11) |
2784 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2785 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2786 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2787 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2788 /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2789 tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2790 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2791 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2792 TILE_SPLIT(split_equal_to_row_size)((split_equal_to_row_size) << 11) |
2793 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2794 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2795 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1)((0) << 16) |
2796 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2797 /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2798 tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2799 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING)((2) << 0) |
2800 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2801 TILE_SPLIT(split_equal_to_row_size)((split_equal_to_row_size) << 11) |
2802 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2803 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2804 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2805 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)((2) << 18));
2806 /* 1D and 1D Array Surfaces */
2807 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED)((1) << 2) |
2808 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING)((0) << 0) |
2809 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2810 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)((0) << 11) |
2811 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2812 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2813 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2814 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2815 /* Displayable maps. */
2816 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1)((2) << 2) |
2817 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING)((0) << 0) |
2818 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2819 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)((0) << 11) |
2820 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2821 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2822 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2823 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2824 /* Display 8bpp. */
2825 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2826 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING)((0) << 0) |
2827 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2828 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2829 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2830 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2831 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2832 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)((2) << 18));
2833 /* Display 16bpp. */
2834 tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2835 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING)((0) << 0) |
2836 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2837 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2838 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2839 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2840 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2841 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2842 /* Display 32bpp. */
2843 tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2844 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING)((0) << 0) |
2845 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2846 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)((3) << 11) |
2847 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2848 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2849 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1)((0) << 16) |
2850 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2851 /* Thin. */
2852 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1)((2) << 2) |
2853 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2854 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2855 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)((0) << 11) |
2856 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2857 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2858 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2859 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2860 /* Thin 8 bpp. */
2861 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2862 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2863 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2864 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2865 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2866 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2867 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2868 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2869 /* Thin 16 bpp. */
2870 tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2871 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2872 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2873 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2874 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2875 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2876 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2877 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2878 /* Thin 32 bpp. */
2879 tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2880 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2881 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2882 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)((3) << 11) |
2883 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2884 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2885 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1)((0) << 16) |
2886 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2887 /* Thin 64 bpp. */
2888 tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2889 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2890 PIPE_CONFIG(ADDR_SURF_P4_8x16)((4) << 6) |
2891 TILE_SPLIT(split_equal_to_row_size)((split_equal_to_row_size) << 11) |
2892 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2893 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2894 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1)((0) << 16) |
2895 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2896 /* 8 bpp PRT. */
2897 tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2898 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2899 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2900 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2901 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2902 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2)((1) << 14) |
2903 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2904 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2905 /* 16 bpp PRT */
2906 tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2907 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2908 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2909 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2910 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2911 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2912 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4)((2) << 16) |
2913 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)((2) << 18));
2914 /* 32 bpp PRT */
2915 tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2916 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2917 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2918 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)((2) << 11) |
2919 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2920 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2921 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2)((1) << 16) |
2922 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2923 /* 64 bpp PRT */
2924 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2925 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2926 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2927 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)((3) << 11) |
2928 NUM_BANKS(ADDR_SURF_16_BANK)((3) << 20) |
2929 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2930 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1)((0) << 16) |
2931 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)((1) << 18));
2932 /* 128 bpp PRT */
2933 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1)((4) << 2) |
2934 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING)((1) << 0) |
2935 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16)((10) << 6) |
2936 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB)((4) << 11) |
2937 NUM_BANKS(ADDR_SURF_8_BANK)((2) << 20) |
2938 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1)((0) << 14) |
2939 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1)((0) << 16) |
2940 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)((0) << 18));
2941
2942 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2943 WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset])r100_mm_wreg(rdev, (0x9910 + (reg_offset * 4)), (tile[reg_offset
]), 0)
;
2944 break;
2945
2946 default:
2947 DRM_ERROR("unknown asic: 0x%x\n", rdev->family)__drm_err("unknown asic: 0x%x\n", rdev->family);
2948 }
2949}
2950
2951static void si_select_se_sh(struct radeon_device *rdev,
2952 u32 se_num, u32 sh_num)
2953{
2954 u32 data = INSTANCE_BROADCAST_WRITES(1 << 30);
2955
2956 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
2957 data |= SH_BROADCAST_WRITES(1 << 29) | SE_BROADCAST_WRITES(1 << 31);
2958 else if (se_num == 0xffffffff)
2959 data |= SE_BROADCAST_WRITES(1 << 31) | SH_INDEX(sh_num)((sh_num) << 8);
2960 else if (sh_num == 0xffffffff)
2961 data |= SH_BROADCAST_WRITES(1 << 29) | SE_INDEX(se_num)((se_num) << 16);
2962 else
2963 data |= SH_INDEX(sh_num)((sh_num) << 8) | SE_INDEX(se_num)((se_num) << 16);
2964 WREG32(GRBM_GFX_INDEX, data)r100_mm_wreg(rdev, (0x802C), (data), 0);
2965}
2966
2967static u32 si_create_bitmask(u32 bit_width)
2968{
2969 u32 i, mask = 0;
2970
2971 for (i = 0; i < bit_width; i++) {
2972 mask <<= 1;
2973 mask |= 1;
2974 }
2975 return mask;
2976}
2977
2978static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
2979{
2980 u32 data, mask;
2981
2982 data = RREG32(CC_GC_SHADER_ARRAY_CONFIG)r100_mm_rreg(rdev, (0x89bc), 0);
2983 if (data & 1)
2984 data &= INACTIVE_CUS_MASK0xFFFF0000;
2985 else
2986 data = 0;
2987 data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG)r100_mm_rreg(rdev, (0x89c0), 0);
2988
2989 data >>= INACTIVE_CUS_SHIFT16;
2990
2991 mask = si_create_bitmask(cu_per_sh);
2992
2993 return ~data & mask;
2994}
2995
2996static void si_setup_spi(struct radeon_device *rdev,
2997 u32 se_num, u32 sh_per_se,
2998 u32 cu_per_sh)
2999{
3000 int i, j, k;
3001 u32 data, mask, active_cu;
3002
3003 for (i = 0; i < se_num; i++) {
3004 for (j = 0; j < sh_per_se; j++) {
3005 si_select_se_sh(rdev, i, j);
3006 data = RREG32(SPI_STATIC_THREAD_MGMT_3)r100_mm_rreg(rdev, (0x90E8), 0);
3007 active_cu = si_get_cu_enabled(rdev, cu_per_sh);
3008
3009 mask = 1;
3010 for (k = 0; k < 16; k++) {
3011 mask <<= k;
3012 if (active_cu & mask) {
3013 data &= ~mask;
3014 WREG32(SPI_STATIC_THREAD_MGMT_3, data)r100_mm_wreg(rdev, (0x90E8), (data), 0);
3015 break;
3016 }
3017 }
3018 }
3019 }
3020 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3021}
3022
3023static u32 si_get_rb_disabled(struct radeon_device *rdev,
3024 u32 max_rb_num_per_se,
3025 u32 sh_per_se)
3026{
3027 u32 data, mask;
3028
3029 data = RREG32(CC_RB_BACKEND_DISABLE)r100_mm_rreg(rdev, (0x98F4), 0);
3030 if (data & 1)
3031 data &= BACKEND_DISABLE_MASK0x00FF0000;
3032 else
3033 data = 0;
3034 data |= RREG32(GC_USER_RB_BACKEND_DISABLE)r100_mm_rreg(rdev, (0x9B7C), 0);
3035
3036 data >>= BACKEND_DISABLE_SHIFT16;
3037
3038 mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
3039
3040 return data & mask;
3041}
3042
3043static void si_setup_rb(struct radeon_device *rdev,
3044 u32 se_num, u32 sh_per_se,
3045 u32 max_rb_num_per_se)
3046{
3047 int i, j;
3048 u32 data, mask;
3049 u32 disabled_rbs = 0;
3050 u32 enabled_rbs = 0;
3051
3052 for (i = 0; i < se_num; i++) {
3053 for (j = 0; j < sh_per_se; j++) {
3054 si_select_se_sh(rdev, i, j);
3055 data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3056 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH2);
3057 }
3058 }
3059 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3060
3061 mask = 1;
3062 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3063 if (!(disabled_rbs & mask))
3064 enabled_rbs |= mask;
3065 mask <<= 1;
3066 }
3067
3068 rdev->config.si.backend_enable_mask = enabled_rbs;
3069
3070 for (i = 0; i < se_num; i++) {
3071 si_select_se_sh(rdev, i, 0xffffffff);
3072 data = 0;
3073 for (j = 0; j < sh_per_se; j++) {
3074 switch (enabled_rbs & 3) {
3075 case 1:
3076 data |= (RASTER_CONFIG_RB_MAP_00 << (i * sh_per_se + j) * 2);
3077 break;
3078 case 2:
3079 data |= (RASTER_CONFIG_RB_MAP_33 << (i * sh_per_se + j) * 2);
3080 break;
3081 case 3:
3082 default:
3083 data |= (RASTER_CONFIG_RB_MAP_22 << (i * sh_per_se + j) * 2);
3084 break;
3085 }
3086 enabled_rbs >>= 2;
3087 }
3088 WREG32(PA_SC_RASTER_CONFIG, data)r100_mm_wreg(rdev, (0x28350), (data), 0);
3089 }
3090 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3091}
3092
3093static void si_gpu_init(struct radeon_device *rdev)
3094{
3095 u32 gb_addr_config = 0;
3096 u32 mc_shared_chmap, mc_arb_ramcfg;
3097 u32 sx_debug_1;
3098 u32 hdp_host_path_cntl;
3099 u32 tmp;
3100 int i, j;
3101
3102 switch (rdev->family) {
3103 case CHIP_TAHITI:
3104 rdev->config.si.max_shader_engines = 2;
3105 rdev->config.si.max_tile_pipes = 12;
3106 rdev->config.si.max_cu_per_sh = 8;
3107 rdev->config.si.max_sh_per_se = 2;
3108 rdev->config.si.max_backends_per_se = 4;
3109 rdev->config.si.max_texture_channel_caches = 12;
3110 rdev->config.si.max_gprs = 256;
3111 rdev->config.si.max_gs_threads = 32;
3112 rdev->config.si.max_hw_contexts = 8;
3113
3114 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3115 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3116 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3117 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3118 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN0x12011003;
3119 break;
3120 case CHIP_PITCAIRN:
3121 rdev->config.si.max_shader_engines = 2;
3122 rdev->config.si.max_tile_pipes = 8;
3123 rdev->config.si.max_cu_per_sh = 5;
3124 rdev->config.si.max_sh_per_se = 2;
3125 rdev->config.si.max_backends_per_se = 4;
3126 rdev->config.si.max_texture_channel_caches = 8;
3127 rdev->config.si.max_gprs = 256;
3128 rdev->config.si.max_gs_threads = 32;
3129 rdev->config.si.max_hw_contexts = 8;
3130
3131 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3132 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3133 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3134 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3135 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN0x12011003;
3136 break;
3137 case CHIP_VERDE:
3138 default:
3139 rdev->config.si.max_shader_engines = 1;
3140 rdev->config.si.max_tile_pipes = 4;
3141 rdev->config.si.max_cu_per_sh = 5;
3142 rdev->config.si.max_sh_per_se = 2;
3143 rdev->config.si.max_backends_per_se = 4;
3144 rdev->config.si.max_texture_channel_caches = 4;
3145 rdev->config.si.max_gprs = 256;
3146 rdev->config.si.max_gs_threads = 32;
3147 rdev->config.si.max_hw_contexts = 8;
3148
3149 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3150 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3151 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3152 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3153 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN0x12010002;
3154 break;
3155 case CHIP_OLAND:
3156 rdev->config.si.max_shader_engines = 1;
3157 rdev->config.si.max_tile_pipes = 4;
3158 rdev->config.si.max_cu_per_sh = 6;
3159 rdev->config.si.max_sh_per_se = 1;
3160 rdev->config.si.max_backends_per_se = 2;
3161 rdev->config.si.max_texture_channel_caches = 4;
3162 rdev->config.si.max_gprs = 256;
3163 rdev->config.si.max_gs_threads = 16;
3164 rdev->config.si.max_hw_contexts = 8;
3165
3166 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3167 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3168 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3169 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3170 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN0x12010002;
3171 break;
3172 case CHIP_HAINAN:
3173 rdev->config.si.max_shader_engines = 1;
3174 rdev->config.si.max_tile_pipes = 4;
3175 rdev->config.si.max_cu_per_sh = 5;
3176 rdev->config.si.max_sh_per_se = 1;
3177 rdev->config.si.max_backends_per_se = 1;
3178 rdev->config.si.max_texture_channel_caches = 2;
3179 rdev->config.si.max_gprs = 256;
3180 rdev->config.si.max_gs_threads = 16;
3181 rdev->config.si.max_hw_contexts = 8;
3182
3183 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3184 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3185 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3186 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3187 gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN0x02010001;
3188 break;
3189 }
3190
3191 /* Initialize HDP */
3192 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
3193 WREG32((0x2c14 + j), 0x00000000)r100_mm_wreg(rdev, ((0x2c14 + j)), (0x00000000), 0);
3194 WREG32((0x2c18 + j), 0x00000000)r100_mm_wreg(rdev, ((0x2c18 + j)), (0x00000000), 0);
3195 WREG32((0x2c1c + j), 0x00000000)r100_mm_wreg(rdev, ((0x2c1c + j)), (0x00000000), 0);
3196 WREG32((0x2c20 + j), 0x00000000)r100_mm_wreg(rdev, ((0x2c20 + j)), (0x00000000), 0);
3197 WREG32((0x2c24 + j), 0x00000000)r100_mm_wreg(rdev, ((0x2c24 + j)), (0x00000000), 0);
3198 }
3199
3200 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff))r100_mm_wreg(rdev, (0x8000), (((0xff) << 0)), 0);
3201 WREG32(SRBM_INT_CNTL, 1)r100_mm_wreg(rdev, (0xEA0), (1), 0);
3202 WREG32(SRBM_INT_ACK, 1)r100_mm_wreg(rdev, (0xEA8), (1), 0);
3203
3204 evergreen_fix_pci_max_read_req_size(rdev);
3205
3206 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN)r100_mm_wreg(rdev, (0x5490), ((1 << 0) | (1 << 1)
), 0)
;
3207
3208 mc_shared_chmap = RREG32(MC_SHARED_CHMAP)r100_mm_rreg(rdev, (0x2004), 0);
3209 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG)r100_mm_rreg(rdev, (0x2760), 0);
3210
3211 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
3212 rdev->config.si.mem_max_burst_length_bytes = 256;
3213 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK0x000000C0) >> NOOFCOLS_SHIFT6;
3214 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
3215 if (rdev->config.si.mem_row_size_in_kb > 4)
3216 rdev->config.si.mem_row_size_in_kb = 4;
3217 /* XXX use MC settings? */
3218 rdev->config.si.shader_engine_tile_size = 32;
3219 rdev->config.si.num_gpus = 1;
3220 rdev->config.si.multi_gpu_tile_size = 64;
3221
3222 /* fix up row size */
3223 gb_addr_config &= ~ROW_SIZE_MASK0x30000000;
3224 switch (rdev->config.si.mem_row_size_in_kb) {
3225 case 1:
3226 default:
3227 gb_addr_config |= ROW_SIZE(0)((0) << 28);
3228 break;
3229 case 2:
3230 gb_addr_config |= ROW_SIZE(1)((1) << 28);
3231 break;
3232 case 4:
3233 gb_addr_config |= ROW_SIZE(2)((2) << 28);
3234 break;
3235 }
3236
3237 /* setup tiling info dword. gb_addr_config is not adequate since it does
3238 * not have bank info, so create a custom tiling dword.
3239 * bits 3:0 num_pipes
3240 * bits 7:4 num_banks
3241 * bits 11:8 group_size
3242 * bits 15:12 row_size
3243 */
3244 rdev->config.si.tile_config = 0;
3245 switch (rdev->config.si.num_tile_pipes) {
3246 case 1:
3247 rdev->config.si.tile_config |= (0 << 0);
3248 break;
3249 case 2:
3250 rdev->config.si.tile_config |= (1 << 0);
3251 break;
3252 case 4:
3253 rdev->config.si.tile_config |= (2 << 0);
3254 break;
3255 case 8:
3256 default:
3257 /* XXX what about 12? */
3258 rdev->config.si.tile_config |= (3 << 0);
3259 break;
3260 }
3261 switch ((mc_arb_ramcfg & NOOFBANK_MASK0x00000003) >> NOOFBANK_SHIFT0) {
3262 case 0: /* four banks */
3263 rdev->config.si.tile_config |= 0 << 4;
3264 break;
3265 case 1: /* eight banks */
3266 rdev->config.si.tile_config |= 1 << 4;
3267 break;
3268 case 2: /* sixteen banks */
3269 default:
3270 rdev->config.si.tile_config |= 2 << 4;
3271 break;
3272 }
3273 rdev->config.si.tile_config |=
3274 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK0x00000070) >> PIPE_INTERLEAVE_SIZE_SHIFT4) << 8;
3275 rdev->config.si.tile_config |=
3276 ((gb_addr_config & ROW_SIZE_MASK0x30000000) >> ROW_SIZE_SHIFT28) << 12;
3277
3278 WREG32(GB_ADDR_CONFIG, gb_addr_config)r100_mm_wreg(rdev, (0x98F8), (gb_addr_config), 0);
3279 WREG32(DMIF_ADDR_CONFIG, gb_addr_config)r100_mm_wreg(rdev, (0xBD4), (gb_addr_config), 0);
3280 WREG32(DMIF_ADDR_CALC, gb_addr_config)r100_mm_wreg(rdev, (0xC00), (gb_addr_config), 0);
3281 WREG32(HDP_ADDR_CONFIG, gb_addr_config)r100_mm_wreg(rdev, (0x2F48), (gb_addr_config), 0);
3282 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config)r100_mm_wreg(rdev, (0xd0b8 + 0x0), (gb_addr_config), 0);
3283 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config)r100_mm_wreg(rdev, (0xd0b8 + 0x800), (gb_addr_config), 0);
3284 if (rdev->has_uvd) {
3285 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config)r100_mm_wreg(rdev, (0xEF4C), (gb_addr_config), 0);
3286 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config)r100_mm_wreg(rdev, (0xEF50), (gb_addr_config), 0);
3287 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config)r100_mm_wreg(rdev, (0xEF54), (gb_addr_config), 0);
3288 }
3289
3290 si_tiling_mode_table_init(rdev);
3291
3292 si_setup_rb(rdev, rdev->config.si.max_shader_engines,
3293 rdev->config.si.max_sh_per_se,
3294 rdev->config.si.max_backends_per_se);
3295
3296 si_setup_spi(rdev, rdev->config.si.max_shader_engines,
3297 rdev->config.si.max_sh_per_se,
3298 rdev->config.si.max_cu_per_sh);
3299
3300 rdev->config.si.active_cus = 0;
3301 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
3302 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
3303 rdev->config.si.active_cus +=
3304 hweight32(si_get_cu_active_bitmap(rdev, i, j));
3305 }
3306 }
3307
3308 /* set HW defaults for 3D engine */
3309 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |r100_mm_wreg(rdev, (0x8760), ((((0x16) << 0) | ((0x2b) <<
8))), 0)
3310 ROQ_IB2_START(0x2b)))r100_mm_wreg(rdev, (0x8760), ((((0x16) << 0) | ((0x2b) <<
8))), 0)
;
3311 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60))r100_mm_wreg(rdev, (0x8764), (((0x30) << 0) | ((0x60) <<
8)), 0)
;
3312
3313 sx_debug_1 = RREG32(SX_DEBUG_1)r100_mm_rreg(rdev, (0x9060), 0);
3314 WREG32(SX_DEBUG_1, sx_debug_1)r100_mm_wreg(rdev, (0x9060), (sx_debug_1), 0);
3315
3316 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4))r100_mm_wreg(rdev, (0x913C), (((4) << 0)), 0);
3317
3318 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |r100_mm_wreg(rdev, (0x8BCC), ((((rdev->config.si.sc_prim_fifo_size_frontend
) << 0) | ((rdev->config.si.sc_prim_fifo_size_backend
) << 6) | ((rdev->config.si.sc_hiz_tile_fifo_size) <<
15) | ((rdev->config.si.sc_earlyz_tile_fifo_size) <<
23))), 0)
3319 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |r100_mm_wreg(rdev, (0x8BCC), ((((rdev->config.si.sc_prim_fifo_size_frontend
) << 0) | ((rdev->config.si.sc_prim_fifo_size_backend
) << 6) | ((rdev->config.si.sc_hiz_tile_fifo_size) <<
15) | ((rdev->config.si.sc_earlyz_tile_fifo_size) <<
23))), 0)
3320 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |r100_mm_wreg(rdev, (0x8BCC), ((((rdev->config.si.sc_prim_fifo_size_frontend
) << 0) | ((rdev->config.si.sc_prim_fifo_size_backend
) << 6) | ((rdev->config.si.sc_hiz_tile_fifo_size) <<
15) | ((rdev->config.si.sc_earlyz_tile_fifo_size) <<
23))), 0)
3321 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)))r100_mm_wreg(rdev, (0x8BCC), ((((rdev->config.si.sc_prim_fifo_size_frontend
) << 0) | ((rdev->config.si.sc_prim_fifo_size_backend
) << 6) | ((rdev->config.si.sc_hiz_tile_fifo_size) <<
15) | ((rdev->config.si.sc_earlyz_tile_fifo_size) <<
23))), 0)
;
3322
3323 WREG32(VGT_NUM_INSTANCES, 1)r100_mm_wreg(rdev, (0x8974), (1), 0);
3324
3325 WREG32(CP_PERFMON_CNTL, 0)r100_mm_wreg(rdev, (0x87FC), (0), 0);
3326
3327 WREG32(SQ_CONFIG, 0)r100_mm_wreg(rdev, (0x8C00), (0), 0);
3328
3329 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |r100_mm_wreg(rdev, (0x8B24), ((((4095) << 0) | ((255) <<
16))), 0)
3330 FORCE_EOV_MAX_REZ_CNT(255)))r100_mm_wreg(rdev, (0x8B24), ((((4095) << 0) | ((255) <<
16))), 0)
;
3331
3332 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |r100_mm_wreg(rdev, (0x88C4), (((2) << 0) | ((3) <<
6)), 0)
3333 AUTO_INVLD_EN(ES_AND_GS_AUTO))r100_mm_wreg(rdev, (0x88C4), (((2) << 0) | ((3) <<
6)), 0)
;
3334
3335 WREG32(VGT_GS_VERTEX_REUSE, 16)r100_mm_wreg(rdev, (0x88D4), (16), 0);
3336 WREG32(PA_SC_LINE_STIPPLE_STATE, 0)r100_mm_wreg(rdev, (0x8B10), (0), 0);
3337
3338 WREG32(CB_PERFCOUNTER0_SELECT0, 0)r100_mm_wreg(rdev, (0x9a20), (0), 0);
3339 WREG32(CB_PERFCOUNTER0_SELECT1, 0)r100_mm_wreg(rdev, (0x9a24), (0), 0);
3340 WREG32(CB_PERFCOUNTER1_SELECT0, 0)r100_mm_wreg(rdev, (0x9a28), (0), 0);
3341 WREG32(CB_PERFCOUNTER1_SELECT1, 0)r100_mm_wreg(rdev, (0x9a2c), (0), 0);
3342 WREG32(CB_PERFCOUNTER2_SELECT0, 0)r100_mm_wreg(rdev, (0x9a30), (0), 0);
3343 WREG32(CB_PERFCOUNTER2_SELECT1, 0)r100_mm_wreg(rdev, (0x9a34), (0), 0);
3344 WREG32(CB_PERFCOUNTER3_SELECT0, 0)r100_mm_wreg(rdev, (0x9a38), (0), 0);
3345 WREG32(CB_PERFCOUNTER3_SELECT1, 0)r100_mm_wreg(rdev, (0x9a3c), (0), 0);
3346
3347 tmp = RREG32(HDP_MISC_CNTL)r100_mm_rreg(rdev, (0x2F4C), 0);
3348 tmp |= HDP_FLUSH_INVALIDATE_CACHE(1 << 0);
3349 WREG32(HDP_MISC_CNTL, tmp)r100_mm_wreg(rdev, (0x2F4C), (tmp), 0);
3350
3351 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL)r100_mm_rreg(rdev, (0x2C00), 0);
3352 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl)r100_mm_wreg(rdev, (0x2C00), (hdp_host_path_cntl), 0);
3353
3354 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3))r100_mm_wreg(rdev, (0x8A14), ((1 << 0) | ((3) << 1
)), 0)
;
3355
3356 udelay(50);
3357}
3358
3359/*
3360 * GPU scratch registers helpers function.
3361 */
3362static void si_scratch_init(struct radeon_device *rdev)
3363{
3364 int i;
3365
3366 rdev->scratch.num_reg = 7;
3367 rdev->scratch.reg_base = SCRATCH_REG00x8500;
3368 for (i = 0; i < rdev->scratch.num_reg; i++) {
3369 rdev->scratch.free[i] = true1;
3370 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
3371 }
3372}
3373
3374void si_fence_ring_emit(struct radeon_device *rdev,
3375 struct radeon_fence *fence)
3376{
3377 struct radeon_ring *ring = &rdev->ring[fence->ring];
3378 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3379
3380 /* flush read cache over gart */
3381 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)((3 << 30) | (((0x68) & 0xFF) << 8) | ((1) &
0x3FFF) << 16)
);
3382 radeon_ring_write(ring, (CP_COHER_CNTL20x85E8 - PACKET3_SET_CONFIG_REG_START0x00008000) >> 2);
3383 radeon_ring_write(ring, 0);
3384 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)((3 << 30) | (((0x43) & 0xFF) << 8) | ((3) &
0x3FFF) << 16)
);
3385 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA(1 << 22) |
3386 PACKET3_TC_ACTION_ENA(1 << 23) |
3387 PACKET3_SH_KCACHE_ACTION_ENA(1 << 27) |
3388 PACKET3_SH_ICACHE_ACTION_ENA(1 << 29));
3389 radeon_ring_write(ring, 0xFFFFFFFF);
3390 radeon_ring_write(ring, 0);
3391 radeon_ring_write(ring, 10); /* poll interval */
3392 /* EVENT_WRITE_EOP - flush caches, send int */
3393 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)((3 << 30) | (((0x47) & 0xFF) << 8) | ((4) &
0x3FFF) << 16)
);
3394 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT)(((20 << 0)) << 0) | EVENT_INDEX(5)((5) << 8));
3395 radeon_ring_write(ring, lower_32_bits(addr)((u32)(addr)));
3396 radeon_ring_write(ring, (upper_32_bits(addr)((u32)(((addr) >> 16) >> 16)) & 0xff) | DATA_SEL(1)((1) << 29) | INT_SEL(2)((2) << 24));
3397 radeon_ring_write(ring, fence->seq);
3398 radeon_ring_write(ring, 0);
3399}
3400
3401/*
3402 * IB stuff
3403 */
3404void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3405{
3406 struct radeon_ring *ring = &rdev->ring[ib->ring];
3407 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
3408 u32 header;
3409
3410 if (ib->is_const_ib) {
3411 /* set switch buffer packet before const IB */
3412 radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)((3 << 30) | (((0x8B) & 0xFF) << 8) | ((0) &
0x3FFF) << 16)
);
3413 radeon_ring_write(ring, 0);
3414
3415 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2)((3 << 30) | (((0x31) & 0xFF) << 8) | ((2) &
0x3FFF) << 16)
;
3416 } else {
3417 u32 next_rptr;
3418 if (ring->rptr_save_reg) {
3419 next_rptr = ring->wptr + 3 + 4 + 8;
3420 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)((3 << 30) | (((0x68) & 0xFF) << 8) | ((1) &
0x3FFF) << 16)
);
3421 radeon_ring_write(ring, ((ring->rptr_save_reg -
3422 PACKET3_SET_CONFIG_REG_START0x00008000) >> 2));
3423 radeon_ring_write(ring, next_rptr);
3424 } else if (rdev->wb.enabled) {
3425 next_rptr = ring->wptr + 5 + 4 + 8;
3426 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)((3 << 30) | (((0x37) & 0xFF) << 8) | ((3) &
0x3FFF) << 16)
);
3427 radeon_ring_write(ring, (1 << 8));
3428 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3429 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)((u32)(((ring->next_rptr_gpu_addr) >> 16) >> 16
))
);
3430 radeon_ring_write(ring, next_rptr);
3431 }
3432
3433 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2)((3 << 30) | (((0x32) & 0xFF) << 8) | ((2) &
0x3FFF) << 16)
;
3434 }
3435
3436 radeon_ring_write(ring, header);
3437 radeon_ring_write(ring,
3438#ifdef __BIG_ENDIAN
3439 (2 << 0) |
3440#endif
3441 (ib->gpu_addr & 0xFFFFFFFC));
3442 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)((u32)(((ib->gpu_addr) >> 16) >> 16)) & 0xFFFF);
3443 radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
3444
3445 if (!ib->is_const_ib) {
3446 /* flush read cache over gart for this vmid */
3447 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)((3 << 30) | (((0x68) & 0xFF) << 8) | ((1) &
0x3FFF) << 16)
);
3448 radeon_ring_write(ring, (CP_COHER_CNTL20x85E8 - PACKET3_SET_CONFIG_REG_START0x00008000) >> 2);
3449 radeon_ring_write(ring, vm_id);
3450 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)((3 << 30) | (((0x43) & 0xFF) << 8) | ((3) &
0x3FFF) << 16)
);
3451 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA(1 << 22) |
3452 PACKET3_TC_ACTION_ENA(1 << 23) |
3453 PACKET3_SH_KCACHE_ACTION_ENA(1 << 27) |
3454 PACKET3_SH_ICACHE_ACTION_ENA(1 << 29));
3455 radeon_ring_write(ring, 0xFFFFFFFF);
3456 radeon_ring_write(ring, 0);
3457 radeon_ring_write(ring, 10); /* poll interval */
3458 }
3459}
3460
3461/*
3462 * CP.
3463 */
3464static void si_cp_enable(struct radeon_device *rdev, bool_Bool enable)
3465{
3466 if (enable)
3467 WREG32(CP_ME_CNTL, 0)r100_mm_wreg(rdev, (0x86D8), (0), 0);
3468 else {
3469 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX0)
3470 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
3471 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT))r100_mm_wreg(rdev, (0x86D8), (((1 << 28) | (1 << 26
) | (1 << 24))), 0)
;
3472 WREG32(SCRATCH_UMSK, 0)r100_mm_wreg(rdev, (0x8540), (0), 0);
3473 rdev->ring[RADEON_RING_TYPE_GFX_INDEX0].ready = false0;
3474 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX1].ready = false0;
3475 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX2].ready = false0;
3476 }
3477 udelay(50);
3478}
3479
3480static int si_cp_load_microcode(struct radeon_device *rdev)
3481{
3482 int i;
3483
3484 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
3485 return -EINVAL22;
3486
3487 si_cp_enable(rdev, false0);
3488
3489 if (rdev->new_fw) {
3490 const struct gfx_firmware_header_v1_0 *pfp_hdr =
3491 (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
3492 const struct gfx_firmware_header_v1_0 *ce_hdr =
3493 (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
3494 const struct gfx_firmware_header_v1_0 *me_hdr =
3495 (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
3496 const __le32 *fw_data;
3497 u32 fw_size;
3498
3499 radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
3500 radeon_ucode_print_gfx_hdr(&ce_hdr->header);
3501 radeon_ucode_print_gfx_hdr(&me_hdr->header);
3502
3503 /* PFP */
3504 fw_data = (const __le32 *)
3505 (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)((__uint32_t)(pfp_hdr->header.ucode_array_offset_bytes)));
3506 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes)((__uint32_t)(pfp_hdr->header.ucode_size_bytes)) / 4;
3507 WREG32(CP_PFP_UCODE_ADDR, 0)r100_mm_wreg(rdev, (0xC150), (0), 0);
3508 for (i = 0; i < fw_size; i++)
3509 WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++))r100_mm_wreg(rdev, (0xC154), (((__uint32_t)(*(__uint32_t *)(fw_data
++)))), 0)
;
3510 WREG32(CP_PFP_UCODE_ADDR, 0)r100_mm_wreg(rdev, (0xC150), (0), 0);
3511
3512 /* CE */
3513 fw_data = (const __le32 *)
3514 (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)((__uint32_t)(ce_hdr->header.ucode_array_offset_bytes)));
3515 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes)((__uint32_t)(ce_hdr->header.ucode_size_bytes)) / 4;
3516 WREG32(CP_CE_UCODE_ADDR, 0)r100_mm_wreg(rdev, (0xC168), (0), 0);
3517 for (i = 0; i < fw_size; i++)
3518 WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++))r100_mm_wreg(rdev, (0xC16C), (((__uint32_t)(*(__uint32_t *)(fw_data
++)))), 0)
;
3519 WREG32(CP_CE_UCODE_ADDR, 0)r100_mm_wreg(rdev, (0xC168), (0), 0);
3520
3521 /* ME */
3522 fw_data = (const __be32 *)
3523 (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)((__uint32_t)(me_hdr->header.ucode_array_offset_bytes)));
3524 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes)((__uint32_t)(me_hdr->header.ucode_size_bytes)) / 4;
3525 WREG32(CP_ME_RAM_WADDR, 0)r100_mm_wreg(rdev, (0xC15C), (0), 0);
3526 for (i = 0; i < fw_size; i++)
3527 WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++))r100_mm_wreg(rdev, (0xC160), (((__uint32_t)(*(__uint32_t *)(fw_data
++)))), 0)
;
3528 WREG32(CP_ME_RAM_WADDR, 0)r100_mm_wreg(rdev, (0xC15C), (0), 0);
3529 } else {
3530 const __be32 *fw_data;
3531
3532 /* PFP */
3533 fw_data = (const __be32 *)rdev->pfp_fw->data;
3534 WREG32(CP_PFP_UCODE_ADDR, 0)r100_mm_wreg(rdev, (0xC150), (0), 0);
3535 for (i = 0; i < SI_PFP_UCODE_SIZE2144; i++)
3536 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++))r100_mm_wreg(rdev, (0xC154), ((__uint32_t)(__builtin_constant_p
(*(__uint32_t *)(fw_data++)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff0000) >> 8 | ((__uint32_t)(*(
__uint32_t *)(fw_data++)) & 0xff000000) >> 24) : __swap32md
(*(__uint32_t *)(fw_data++)))), 0)
;
3537 WREG32(CP_PFP_UCODE_ADDR, 0)r100_mm_wreg(rdev, (0xC150), (0), 0);
3538
3539 /* CE */
3540 fw_data = (const __be32 *)rdev->ce_fw->data;
3541 WREG32(CP_CE_UCODE_ADDR, 0)r100_mm_wreg(rdev, (0xC168), (0), 0);
3542 for (i = 0; i < SI_CE_UCODE_SIZE2144; i++)
3543 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++))r100_mm_wreg(rdev, (0xC16C), ((__uint32_t)(__builtin_constant_p
(*(__uint32_t *)(fw_data++)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff0000) >> 8 | ((__uint32_t)(*(
__uint32_t *)(fw_data++)) & 0xff000000) >> 24) : __swap32md
(*(__uint32_t *)(fw_data++)))), 0)
;
3544 WREG32(CP_CE_UCODE_ADDR, 0)r100_mm_wreg(rdev, (0xC168), (0), 0);
3545
3546 /* ME */
3547 fw_data = (const __be32 *)rdev->me_fw->data;
3548 WREG32(CP_ME_RAM_WADDR, 0)r100_mm_wreg(rdev, (0xC15C), (0), 0);
3549 for (i = 0; i < SI_PM4_UCODE_SIZE2144; i++)
3550 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++))r100_mm_wreg(rdev, (0xC160), ((__uint32_t)(__builtin_constant_p
(*(__uint32_t *)(fw_data++)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff0000) >> 8 | ((__uint32_t)(*(
__uint32_t *)(fw_data++)) & 0xff000000) >> 24) : __swap32md
(*(__uint32_t *)(fw_data++)))), 0)
;
3551 WREG32(CP_ME_RAM_WADDR, 0)r100_mm_wreg(rdev, (0xC15C), (0), 0);
3552 }
3553
3554 WREG32(CP_PFP_UCODE_ADDR, 0)r100_mm_wreg(rdev, (0xC150), (0), 0);
3555 WREG32(CP_CE_UCODE_ADDR, 0)r100_mm_wreg(rdev, (0xC168), (0), 0);
3556 WREG32(CP_ME_RAM_WADDR, 0)r100_mm_wreg(rdev, (0xC15C), (0), 0);
3557 WREG32(CP_ME_RAM_RADDR, 0)r100_mm_wreg(rdev, (0xC158), (0), 0);
3558 return 0;
3559}
3560
3561static int si_cp_start(struct radeon_device *rdev)
3562{
3563 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX0];
3564 int r, i;
3565
3566 r = radeon_ring_lock(rdev, ring, 7 + 4);
3567 if (r) {
3568 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r)__drm_err("radeon: cp failed to lock ring (%d).\n", r);
3569 return r;
3570 }
3571 /* init the CP */
3572 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)((3 << 30) | (((0x44) & 0xFF) << 8) | ((5) &
0x3FFF) << 16)
);
3573 radeon_ring_write(ring, 0x1);
3574 radeon_ring_write(ring, 0x0);
3575 radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
3576 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)((1) << 16));
3577 radeon_ring_write(ring, 0);
3578 radeon_ring_write(ring, 0);
3579
3580 /* init the CE partitions */
3581 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)((3 << 30) | (((0x11) & 0xFF) << 8) | ((2) &
0x3FFF) << 16)
);
3582 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)((3) << 0));
3583 radeon_ring_write(ring, 0xc000);
3584 radeon_ring_write(ring, 0xe000);
3585 radeon_ring_unlock_commit(rdev, ring, false0);
3586
3587 si_cp_enable(rdev, true1);
3588
3589 r = radeon_ring_lock(rdev, ring, si_default_size + 10);
3590 if (r) {
3591 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r)__drm_err("radeon: cp failed to lock ring (%d).\n", r);
3592 return r;
3593 }
3594
3595 /* setup clear context state */
3596 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)((3 << 30) | (((0x4A) & 0xFF) << 8) | ((0) &
0x3FFF) << 16)
);
3597 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE(2 << 28));
3598
3599 for (i = 0; i < si_default_size; i++)
3600 radeon_ring_write(ring, si_default_state[i]);
3601
3602 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)((3 << 30) | (((0x4A) & 0xFF) << 8) | ((0) &
0x3FFF) << 16)
);
3603 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE(3 << 28));
3604
3605 /* set clear context state */
3606 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)((3 << 30) | (((0x12) & 0xFF) << 8) | ((0) &
0x3FFF) << 16)
);
3607 radeon_ring_write(ring, 0);
3608
3609 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)((3 << 30) | (((0x69) & 0xFF) << 8) | ((2) &
0x3FFF) << 16)
);
3610 radeon_ring_write(ring, 0x00000316);
3611 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3612 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
3613
3614 radeon_ring_unlock_commit(rdev, ring, false0);
3615
3616 for (i = RADEON_RING_TYPE_GFX_INDEX0; i <= CAYMAN_RING_TYPE_CP2_INDEX2; ++i) {
3617 ring = &rdev->ring[i];
3618 r = radeon_ring_lock(rdev, ring, 2);
3619
3620 /* clear the compute context state */
3621 radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)(((3 << 30) | (((0x12) & 0xFF) << 8) | ((0) &
0x3FFF) << 16) | 1 << 1)
);
3622 radeon_ring_write(ring, 0);
3623
3624 radeon_ring_unlock_commit(rdev, ring, false0);
3625 }
3626
3627 return 0;
3628}
3629
3630static void si_cp_fini(struct radeon_device *rdev)
3631{
3632 struct radeon_ring *ring;
3633 si_cp_enable(rdev, false0);
3634
3635 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX0];
3636 radeon_ring_fini(rdev, ring);
3637 radeon_scratch_free(rdev, ring->rptr_save_reg);
3638
3639 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX1];
3640 radeon_ring_fini(rdev, ring);
3641 radeon_scratch_free(rdev, ring->rptr_save_reg);
3642
3643 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX2];
3644 radeon_ring_fini(rdev, ring);
3645 radeon_scratch_free(rdev, ring->rptr_save_reg);
3646}
3647
3648static int si_cp_resume(struct radeon_device *rdev)
3649{
3650 struct radeon_ring *ring;
3651 u32 tmp;
3652 u32 rb_bufsz;
3653 int r;
3654
3655 si_enable_gui_idle_interrupt(rdev, false0);
3656
3657 WREG32(CP_SEM_WAIT_TIMER, 0x0)r100_mm_wreg(rdev, (0x85BC), (0x0), 0);
3658 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0)r100_mm_wreg(rdev, (0x85C8), (0x0), 0);
3659
3660 /* Set the write pointer delay */
3661 WREG32(CP_RB_WPTR_DELAY, 0)r100_mm_wreg(rdev, (0x8704), (0), 0);
3662
3663 WREG32(CP_DEBUG, 0)r100_mm_wreg(rdev, (0xC1FC), (0), 0);
3664 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF)r100_mm_wreg(rdev, (0x8544), (((rdev->wb.gpu_addr + 0) >>
8) & 0xFFFFFFFF), 0)
;
3665
3666 /* ring 0 - compute and gfx */
3667 /* Set ring buffer size */
3668 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX0];
3669 rb_bufsz = order_base_2(ring->ring_size / 8)drm_order(ring->ring_size / 8);
3670 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8)drm_order(4096/8) << 8) | rb_bufsz;
3671#ifdef __BIG_ENDIAN
3672 tmp |= BUF_SWAP_32BIT(2 << 16);
3673#endif
3674 WREG32(CP_RB0_CNTL, tmp)r100_mm_wreg(rdev, (0xC104), (tmp), 0);
3675
3676 /* Initialize the ring buffer's read and write pointers */
3677 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA)r100_mm_wreg(rdev, (0xC104), (tmp | (1 << 31)), 0);
3678 ring->wptr = 0;
3679 WREG32(CP_RB0_WPTR, ring->wptr)r100_mm_wreg(rdev, (0xC114), (ring->wptr), 0);
3680
3681 /* set the wb address whether it's enabled or not */
3682 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)r100_mm_wreg(rdev, (0xC10C), ((rdev->wb.gpu_addr + 1024) &
0xFFFFFFFC), 0)
;
3683 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF)r100_mm_wreg(rdev, (0xC110), (((u32)(((rdev->wb.gpu_addr +
1024) >> 16) >> 16)) & 0xFF), 0)
;
3684
3685 if (rdev->wb.enabled)
3686 WREG32(SCRATCH_UMSK, 0xff)r100_mm_wreg(rdev, (0x8540), (0xff), 0);
3687 else {
3688 tmp |= RB_NO_UPDATE(1 << 27);
3689 WREG32(SCRATCH_UMSK, 0)r100_mm_wreg(rdev, (0x8540), (0), 0);
3690 }
3691
3692 mdelay(1);
3693 WREG32(CP_RB0_CNTL, tmp)r100_mm_wreg(rdev, (0xC104), (tmp), 0);
3694
3695 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8)r100_mm_wreg(rdev, (0xC100), (ring->gpu_addr >> 8), 0
)
;
3696
3697 /* ring1 - compute only */
3698 /* Set ring buffer size */
3699 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX1];
3700 rb_bufsz = order_base_2(ring->ring_size / 8)drm_order(ring->ring_size / 8);
3701 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8)drm_order(4096/8) << 8) | rb_bufsz;
3702#ifdef __BIG_ENDIAN
3703 tmp |= BUF_SWAP_32BIT(2 << 16);
3704#endif
3705 WREG32(CP_RB1_CNTL, tmp)r100_mm_wreg(rdev, (0xC184), (tmp), 0);
3706
3707 /* Initialize the ring buffer's read and write pointers */
3708 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA)r100_mm_wreg(rdev, (0xC184), (tmp | (1 << 31)), 0);
3709 ring->wptr = 0;
3710 WREG32(CP_RB1_WPTR, ring->wptr)r100_mm_wreg(rdev, (0xC190), (ring->wptr), 0);
3711
3712 /* set the wb address whether it's enabled or not */
3713 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC)r100_mm_wreg(rdev, (0xC188), ((rdev->wb.gpu_addr + 1280) &
0xFFFFFFFC), 0)
;
3714 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF)r100_mm_wreg(rdev, (0xC18C), (((u32)(((rdev->wb.gpu_addr +
1280) >> 16) >> 16)) & 0xFF), 0)
;
3715
3716 mdelay(1);
3717 WREG32(CP_RB1_CNTL, tmp)r100_mm_wreg(rdev, (0xC184), (tmp), 0);
3718
3719 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8)r100_mm_wreg(rdev, (0xC180), (ring->gpu_addr >> 8), 0
)
;
3720
3721 /* ring2 - compute only */
3722 /* Set ring buffer size */
3723 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX2];
3724 rb_bufsz = order_base_2(ring->ring_size / 8)drm_order(ring->ring_size / 8);
3725 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8)drm_order(4096/8) << 8) | rb_bufsz;
3726#ifdef __BIG_ENDIAN
3727 tmp |= BUF_SWAP_32BIT(2 << 16);
3728#endif
3729 WREG32(CP_RB2_CNTL, tmp)r100_mm_wreg(rdev, (0xC198), (tmp), 0);
3730
3731 /* Initialize the ring buffer's read and write pointers */
3732 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA)r100_mm_wreg(rdev, (0xC198), (tmp | (1 << 31)), 0);
3733 ring->wptr = 0;
3734 WREG32(CP_RB2_WPTR, ring->wptr)r100_mm_wreg(rdev, (0xC1A4), (ring->wptr), 0);
3735
3736 /* set the wb address whether it's enabled or not */
3737 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC)r100_mm_wreg(rdev, (0xC19C), ((rdev->wb.gpu_addr + 1536) &
0xFFFFFFFC), 0)
;
3738 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF)r100_mm_wreg(rdev, (0xC1A0), (((u32)(((rdev->wb.gpu_addr +
1536) >> 16) >> 16)) & 0xFF), 0)
;
3739
3740 mdelay(1);
3741 WREG32(CP_RB2_CNTL, tmp)r100_mm_wreg(rdev, (0xC198), (tmp), 0);
3742
3743 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8)r100_mm_wreg(rdev, (0xC194), (ring->gpu_addr >> 8), 0
)
;
3744
3745 /* start the rings */
3746 si_cp_start(rdev);
3747 rdev->ring[RADEON_RING_TYPE_GFX_INDEX0].ready = true1;
3748 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX1].ready = true1;
3749 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX2].ready = true1;
3750 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX])(rdev)->asic->ring[(0)]->ring_test((rdev), (&rdev
->ring[0]))
;
3751 if (r) {
3752 rdev->ring[RADEON_RING_TYPE_GFX_INDEX0].ready = false0;
3753 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX1].ready = false0;
3754 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX2].ready = false0;
3755 return r;
3756 }
3757 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])(rdev)->asic->ring[(1)]->ring_test((rdev), (&rdev
->ring[1]))
;
3758 if (r) {
3759 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX1].ready = false0;
3760 }
3761 r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])(rdev)->asic->ring[(2)]->ring_test((rdev), (&rdev
->ring[2]))
;
3762 if (r) {
3763 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX2].ready = false0;
3764 }
3765
3766 si_enable_gui_idle_interrupt(rdev, true1);
3767
3768 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX0)
3769 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3770
3771 return 0;
3772}
3773
3774u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
3775{
3776 u32 reset_mask = 0;
3777 u32 tmp;
3778
3779 /* GRBM_STATUS */
3780 tmp = RREG32(GRBM_STATUS)r100_mm_rreg(rdev, (0x8010), 0);
3781 if (tmp & (PA_BUSY(1 << 25) | SC_BUSY(1 << 24) |
3782 BCI_BUSY(1 << 23) | SX_BUSY(1 << 20) |
3783 TA_BUSY(1 << 14) | VGT_BUSY(1 << 17) |
3784 DB_BUSY(1 << 26) | CB_BUSY(1 << 30) |
3785 GDS_BUSY(1 << 15) | SPI_BUSY(1 << 22) |
3786 IA_BUSY(1 << 19) | IA_BUSY_NO_DMA(1 << 18)))
3787 reset_mask |= RADEON_RESET_GFX(1 << 0);
3788
3789 if (tmp & (CF_RQ_PENDING(1 << 7) | PF_RQ_PENDING(1 << 8) |
3790 CP_BUSY(1 << 29) | CP_COHERENCY_BUSY(1 << 28)))
3791 reset_mask |= RADEON_RESET_CP(1 << 3);
3792
3793 if (tmp & GRBM_EE_BUSY(1 << 10))
3794 reset_mask |= RADEON_RESET_GRBM(1 << 4) | RADEON_RESET_GFX(1 << 0) | RADEON_RESET_CP(1 << 3);
3795
3796 /* GRBM_STATUS2 */
3797 tmp = RREG32(GRBM_STATUS2)r100_mm_rreg(rdev, (0x8008), 0);
3798 if (tmp & (RLC_RQ_PENDING(1 << 0) | RLC_BUSY(1 << 8)))
3799 reset_mask |= RADEON_RESET_RLC(1 << 6);
3800
3801 /* DMA_STATUS_REG 0 */
3802 tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET)r100_mm_rreg(rdev, (0xd034 + 0x0), 0);
3803 if (!(tmp & DMA_IDLE(1 << 0)))
3804 reset_mask |= RADEON_RESET_DMA(1 << 2);
3805
3806 /* DMA_STATUS_REG 1 */
3807 tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET)r100_mm_rreg(rdev, (0xd034 + 0x800), 0);
3808 if (!(tmp & DMA_IDLE(1 << 0)))
3809 reset_mask |= RADEON_RESET_DMA1(1 << 5);
3810
3811 /* SRBM_STATUS2 */
3812 tmp = RREG32(SRBM_STATUS2)r100_mm_rreg(rdev, (0x0EC4), 0);
3813 if (tmp & DMA_BUSY(1 << 5))
3814 reset_mask |= RADEON_RESET_DMA(1 << 2);
3815
3816 if (tmp & DMA1_BUSY(1 << 6))
3817 reset_mask |= RADEON_RESET_DMA1(1 << 5);
3818
3819 /* SRBM_STATUS */
3820 tmp = RREG32(SRBM_STATUS)r100_mm_rreg(rdev, (0xE50), 0);
3821
3822 if (tmp & IH_BUSY(1 << 17))
3823 reset_mask |= RADEON_RESET_IH(1 << 8);
3824
3825 if (tmp & SEM_BUSY(1 << 14))
3826 reset_mask |= RADEON_RESET_SEM(1 << 7);
3827
3828 if (tmp & GRBM_RQ_PENDING(1 << 5))
3829 reset_mask |= RADEON_RESET_GRBM(1 << 4);
3830
3831 if (tmp & VMC_BUSY(1 << 8))
3832 reset_mask |= RADEON_RESET_VMC(1 << 9);
3833
3834 if (tmp & (MCB_BUSY(1 << 9) | MCB_NON_DISPLAY_BUSY(1 << 10) |
3835 MCC_BUSY(1 << 11) | MCD_BUSY(1 << 12)))
3836 reset_mask |= RADEON_RESET_MC(1 << 10);
3837
3838 if (evergreen_is_display_hung(rdev))
3839 reset_mask |= RADEON_RESET_DISPLAY(1 << 11);
3840
3841 /* VM_L2_STATUS */
3842 tmp = RREG32(VM_L2_STATUS)r100_mm_rreg(rdev, (0x140C), 0);
3843 if (tmp & L2_BUSY(1 << 0))
3844 reset_mask |= RADEON_RESET_VMC(1 << 9);
3845
3846 /* Skip MC reset as it's mostly likely not hung, just busy */
3847 if (reset_mask & RADEON_RESET_MC(1 << 10)) {
3848 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask)__drm_dbg(DRM_UT_CORE, "MC busy: 0x%08X, clearing.\n", reset_mask
)
;
3849 reset_mask &= ~RADEON_RESET_MC(1 << 10);
3850 }
3851
3852 return reset_mask;
3853}
3854
3855static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
3856{
3857 struct evergreen_mc_save save;
3858 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
3859 u32 tmp;
3860
3861 if (reset_mask == 0)
3862 return;
3863
3864 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask)do { } while(0);
3865
3866 evergreen_print_gpu_status_regs(rdev);
3867 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",do { } while(0)
3868 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR))do { } while(0);
3869 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",do { } while(0)
3870 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS))do { } while(0);
3871
3872 /* disable PG/CG */
3873 si_fini_pg(rdev);
3874 si_fini_cg(rdev);
3875
3876 /* stop the rlc */
3877 si_rlc_stop(rdev);
3878
3879 /* Disable CP parsing/prefetching */
3880 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)r100_mm_wreg(rdev, (0x86D8), ((1 << 28) | (1 << 26
) | (1 << 24)), 0)
;
3881
3882 if (reset_mask & RADEON_RESET_DMA(1 << 2)) {
3883 /* dma0 */
3884 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET)r100_mm_rreg(rdev, (0xd000 + 0x0), 0);
3885 tmp &= ~DMA_RB_ENABLE(1 << 0);
3886 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp)r100_mm_wreg(rdev, (0xd000 + 0x0), (tmp), 0);
3887 }
3888 if (reset_mask & RADEON_RESET_DMA1(1 << 5)) {
3889 /* dma1 */
3890 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET)r100_mm_rreg(rdev, (0xd000 + 0x800), 0);
3891 tmp &= ~DMA_RB_ENABLE(1 << 0);
3892 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp)r100_mm_wreg(rdev, (0xd000 + 0x800), (tmp), 0);
3893 }
3894
3895 udelay(50);
3896
3897 evergreen_mc_stop(rdev, &save);
3898 if (evergreen_mc_wait_for_idle(rdev)) {
3899 dev_warn(rdev->dev, "Wait for MC idle timedout !\n")printf("drm:pid%d:%s *WARNING* " "Wait for MC idle timedout !\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
3900 }
3901
3902 if (reset_mask & (RADEON_RESET_GFX(1 << 0) | RADEON_RESET_COMPUTE(1 << 1) | RADEON_RESET_CP(1 << 3))) {
3903 grbm_soft_reset = SOFT_RESET_CB(1 << 1) |
3904 SOFT_RESET_DB(1 << 3) |
3905 SOFT_RESET_GDS(1 << 4) |
3906 SOFT_RESET_PA(1 << 5) |
3907 SOFT_RESET_SC(1 << 6) |
3908 SOFT_RESET_BCI(1 << 7) |
3909 SOFT_RESET_SPI(1 << 8) |
3910 SOFT_RESET_SX(1 << 10) |
3911 SOFT_RESET_TC(1 << 11) |
3912 SOFT_RESET_TA(1 << 12) |
3913 SOFT_RESET_VGT(1 << 14) |
3914 SOFT_RESET_IA(1 << 15);
3915 }
3916
3917 if (reset_mask & RADEON_RESET_CP(1 << 3)) {
3918 grbm_soft_reset |= SOFT_RESET_CP(1 << 0) | SOFT_RESET_VGT(1 << 14);
3919
3920 srbm_soft_reset |= SOFT_RESET_GRBM(1 << 8);
3921 }
3922
3923 if (reset_mask & RADEON_RESET_DMA(1 << 2))
3924 srbm_soft_reset |= SOFT_RESET_DMA(1 << 20);
3925
3926 if (reset_mask & RADEON_RESET_DMA1(1 << 5))
3927 srbm_soft_reset |= SOFT_RESET_DMA1(1 << 6);
3928
3929 if (reset_mask & RADEON_RESET_DISPLAY(1 << 11))
3930 srbm_soft_reset |= SOFT_RESET_DC(1 << 5);
3931
3932 if (reset_mask & RADEON_RESET_RLC(1 << 6))
3933 grbm_soft_reset |= SOFT_RESET_RLC(1 << 2);
3934
3935 if (reset_mask & RADEON_RESET_SEM(1 << 7))
3936 srbm_soft_reset |= SOFT_RESET_SEM(1 << 15);
3937
3938 if (reset_mask & RADEON_RESET_IH(1 << 8))
3939 srbm_soft_reset |= SOFT_RESET_IH(1 << 10);
3940
3941 if (reset_mask & RADEON_RESET_GRBM(1 << 4))
3942 srbm_soft_reset |= SOFT_RESET_GRBM(1 << 8);
3943
3944 if (reset_mask & RADEON_RESET_VMC(1 << 9))
3945 srbm_soft_reset |= SOFT_RESET_VMC(1 << 17);
3946
3947 if (reset_mask & RADEON_RESET_MC(1 << 10))
3948 srbm_soft_reset |= SOFT_RESET_MC(1 << 11);
3949
3950 if (grbm_soft_reset) {
3951 tmp = RREG32(GRBM_SOFT_RESET)r100_mm_rreg(rdev, (0x8020), 0);
3952 tmp |= grbm_soft_reset;
3953 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp)do { } while(0);
3954 WREG32(GRBM_SOFT_RESET, tmp)r100_mm_wreg(rdev, (0x8020), (tmp), 0);
3955 tmp = RREG32(GRBM_SOFT_RESET)r100_mm_rreg(rdev, (0x8020), 0);
3956
3957 udelay(50);
3958
3959 tmp &= ~grbm_soft_reset;
3960 WREG32(GRBM_SOFT_RESET, tmp)r100_mm_wreg(rdev, (0x8020), (tmp), 0);
3961 tmp = RREG32(GRBM_SOFT_RESET)r100_mm_rreg(rdev, (0x8020), 0);
3962 }
3963
3964 if (srbm_soft_reset) {
3965 tmp = RREG32(SRBM_SOFT_RESET)r100_mm_rreg(rdev, (0x0E60), 0);
3966 tmp |= srbm_soft_reset;
3967 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp)do { } while(0);
3968 WREG32(SRBM_SOFT_RESET, tmp)r100_mm_wreg(rdev, (0x0E60), (tmp), 0);
3969 tmp = RREG32(SRBM_SOFT_RESET)r100_mm_rreg(rdev, (0x0E60), 0);
3970
3971 udelay(50);
3972
3973 tmp &= ~srbm_soft_reset;
3974 WREG32(SRBM_SOFT_RESET, tmp)r100_mm_wreg(rdev, (0x0E60), (tmp), 0);
3975 tmp = RREG32(SRBM_SOFT_RESET)r100_mm_rreg(rdev, (0x0E60), 0);
3976 }
3977
3978 /* Wait a little for things to settle down */
3979 udelay(50);
3980
3981 evergreen_mc_resume(rdev, &save);
3982 udelay(50);
3983
3984 evergreen_print_gpu_status_regs(rdev);
3985}
3986
3987static void si_set_clk_bypass_mode(struct radeon_device *rdev)
3988{
3989 u32 tmp, i;
3990
3991 tmp = RREG32(CG_SPLL_FUNC_CNTL)r100_mm_rreg(rdev, (0x600), 0);
3992 tmp |= SPLL_BYPASS_EN(1 << 3);
3993 WREG32(CG_SPLL_FUNC_CNTL, tmp)r100_mm_wreg(rdev, (0x600), (tmp), 0);
3994
3995 tmp = RREG32(CG_SPLL_FUNC_CNTL_2)r100_mm_rreg(rdev, (0x604), 0);
3996 tmp |= SPLL_CTLREQ_CHG(1 << 23);
3997 WREG32(CG_SPLL_FUNC_CNTL_2, tmp)r100_mm_wreg(rdev, (0x604), (tmp), 0);
3998
3999 for (i = 0; i < rdev->usec_timeout; i++) {
4000 if (RREG32(SPLL_STATUS)r100_mm_rreg(rdev, (0x614), 0) & SPLL_CHG_STATUS(1 << 1))
4001 break;
4002 udelay(1);
4003 }
4004
4005 tmp = RREG32(CG_SPLL_FUNC_CNTL_2)r100_mm_rreg(rdev, (0x604), 0);
4006 tmp &= ~(SPLL_CTLREQ_CHG(1 << 23) | SCLK_MUX_UPDATE(1 << 26));
4007 WREG32(CG_SPLL_FUNC_CNTL_2, tmp)r100_mm_wreg(rdev, (0x604), (tmp), 0);
4008
4009 tmp = RREG32(MPLL_CNTL_MODE)r100_mm_rreg(rdev, (0x2bb0), 0);
4010 tmp &= ~MPLL_MCLK_SEL(1 << 11);
4011 WREG32(MPLL_CNTL_MODE, tmp)r100_mm_wreg(rdev, (0x2bb0), (tmp), 0);
4012}
4013
4014static void si_spll_powerdown(struct radeon_device *rdev)
4015{
4016 u32 tmp;
4017
4018 tmp = RREG32(SPLL_CNTL_MODE)r100_mm_rreg(rdev, (0x618), 0);
4019 tmp |= SPLL_SW_DIR_CONTROL(1 << 0);
4020 WREG32(SPLL_CNTL_MODE, tmp)r100_mm_wreg(rdev, (0x618), (tmp), 0);
4021
4022 tmp = RREG32(CG_SPLL_FUNC_CNTL)r100_mm_rreg(rdev, (0x600), 0);
4023 tmp |= SPLL_RESET(1 << 0);
4024 WREG32(CG_SPLL_FUNC_CNTL, tmp)r100_mm_wreg(rdev, (0x600), (tmp), 0);
4025
4026 tmp = RREG32(CG_SPLL_FUNC_CNTL)r100_mm_rreg(rdev, (0x600), 0);
4027 tmp |= SPLL_SLEEP(1 << 1);
4028 WREG32(CG_SPLL_FUNC_CNTL, tmp)r100_mm_wreg(rdev, (0x600), (tmp), 0);
4029
4030 tmp = RREG32(SPLL_CNTL_MODE)r100_mm_rreg(rdev, (0x618), 0);
4031 tmp &= ~SPLL_SW_DIR_CONTROL(1 << 0);
4032 WREG32(SPLL_CNTL_MODE, tmp)r100_mm_wreg(rdev, (0x618), (tmp), 0);
4033}
4034
4035static void si_gpu_pci_config_reset(struct radeon_device *rdev)
4036{
4037 struct evergreen_mc_save save;
4038 u32 tmp, i;
4039
4040 dev_info(rdev->dev, "GPU pci config reset\n")do { } while(0);
4041
4042 /* disable dpm? */
4043
4044 /* disable cg/pg */
4045 si_fini_pg(rdev);
4046 si_fini_cg(rdev);
4047
4048 /* Disable CP parsing/prefetching */
4049 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)r100_mm_wreg(rdev, (0x86D8), ((1 << 28) | (1 << 26
) | (1 << 24)), 0)
;
4050 /* dma0 */
4051 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET)r100_mm_rreg(rdev, (0xd000 + 0x0), 0);
4052 tmp &= ~DMA_RB_ENABLE(1 << 0);
4053 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp)r100_mm_wreg(rdev, (0xd000 + 0x0), (tmp), 0);
4054 /* dma1 */
4055 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET)r100_mm_rreg(rdev, (0xd000 + 0x800), 0);
4056 tmp &= ~DMA_RB_ENABLE(1 << 0);
4057 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp)r100_mm_wreg(rdev, (0xd000 + 0x800), (tmp), 0);
4058 /* XXX other engines? */
4059
4060 /* halt the rlc, disable cp internal ints */
4061 si_rlc_stop(rdev);
4062
4063 udelay(50);
4064
4065 /* disable mem access */
4066 evergreen_mc_stop(rdev, &save);
4067 if (evergreen_mc_wait_for_idle(rdev)) {
4068 dev_warn(rdev->dev, "Wait for MC idle timed out !\n")printf("drm:pid%d:%s *WARNING* " "Wait for MC idle timed out !\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4069 }
4070
4071 /* set mclk/sclk to bypass */
4072 si_set_clk_bypass_mode(rdev);
4073 /* powerdown spll */
4074 si_spll_powerdown(rdev);
4075 /* disable BM */
4076 pci_clear_master(rdev->pdev);
4077 /* reset */
4078 radeon_pci_config_reset(rdev);
4079 /* wait for asic to come out of reset */
4080 for (i = 0; i < rdev->usec_timeout; i++) {
4081 if (RREG32(CONFIG_MEMSIZE)r100_mm_rreg(rdev, (0x5428), 0) != 0xffffffff)
4082 break;
4083 udelay(1);
4084 }
4085}
4086
4087int si_asic_reset(struct radeon_device *rdev, bool_Bool hard)
4088{
4089 u32 reset_mask;
4090
4091 if (hard) {
4092 si_gpu_pci_config_reset(rdev);
4093 return 0;
4094 }
4095
4096 reset_mask = si_gpu_check_soft_reset(rdev);
4097
4098 if (reset_mask)
4099 r600_set_bios_scratch_engine_hung(rdev, true1);
4100
4101 /* try soft reset */
4102 si_gpu_soft_reset(rdev, reset_mask);
4103
4104 reset_mask = si_gpu_check_soft_reset(rdev);
4105
4106 /* try pci config reset */
4107 if (reset_mask && radeon_hard_reset)
4108 si_gpu_pci_config_reset(rdev);
4109
4110 reset_mask = si_gpu_check_soft_reset(rdev);
4111
4112 if (!reset_mask)
4113 r600_set_bios_scratch_engine_hung(rdev, false0);
4114
4115 return 0;
4116}
4117
4118/**
4119 * si_gfx_is_lockup - Check if the GFX engine is locked up
4120 *
4121 * @rdev: radeon_device pointer
4122 * @ring: radeon_ring structure holding ring information
4123 *
4124 * Check if the GFX engine is locked up.
4125 * Returns true if the engine appears to be locked up, false if not.
4126 */
4127bool_Bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4128{
4129 u32 reset_mask = si_gpu_check_soft_reset(rdev);
4130
4131 if (!(reset_mask & (RADEON_RESET_GFX(1 << 0) |
4132 RADEON_RESET_COMPUTE(1 << 1) |
4133 RADEON_RESET_CP(1 << 3)))) {
4134 radeon_ring_lockup_update(rdev, ring);
4135 return false0;
4136 }
4137 return radeon_ring_test_lockup(rdev, ring);
4138}
4139
4140/* MC */
4141static void si_mc_program(struct radeon_device *rdev)
4142{
4143 struct evergreen_mc_save save;
4144 u32 tmp;
4145 int i, j;
4146
4147 /* Initialize HDP */
4148 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
4149 WREG32((0x2c14 + j), 0x00000000)r100_mm_wreg(rdev, ((0x2c14 + j)), (0x00000000), 0);
4150 WREG32((0x2c18 + j), 0x00000000)r100_mm_wreg(rdev, ((0x2c18 + j)), (0x00000000), 0);
4151 WREG32((0x2c1c + j), 0x00000000)r100_mm_wreg(rdev, ((0x2c1c + j)), (0x00000000), 0);
4152 WREG32((0x2c20 + j), 0x00000000)r100_mm_wreg(rdev, ((0x2c20 + j)), (0x00000000), 0);
4153 WREG32((0x2c24 + j), 0x00000000)r100_mm_wreg(rdev, ((0x2c24 + j)), (0x00000000), 0);
4154 }
4155 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0)r100_mm_wreg(rdev, (0x54A0), (0), 0);
4156
4157 evergreen_mc_stop(rdev, &save);
4158 if (radeon_mc_wait_for_idle(rdev)(rdev)->asic->mc_wait_for_idle((rdev))) {
4159 dev_warn(rdev->dev, "Wait for MC idle timedout !\n")printf("drm:pid%d:%s *WARNING* " "Wait for MC idle timedout !\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4160 }
4161 if (!ASIC_IS_NODCE(rdev)((rdev->family == CHIP_HAINAN)))
4162 /* Lockout access through VGA aperture*/
4163 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE)r100_mm_wreg(rdev, (0x328), ((1 << 4)), 0);
4164 /* Update configuration */
4165 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,r100_mm_wreg(rdev, (0x2034), (rdev->mc.vram_start >>
12), 0)
4166 rdev->mc.vram_start >> 12)r100_mm_wreg(rdev, (0x2034), (rdev->mc.vram_start >>
12), 0)
;
4167 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,r100_mm_wreg(rdev, (0x2038), (rdev->mc.vram_end >> 12
), 0)
4168 rdev->mc.vram_end >> 12)r100_mm_wreg(rdev, (0x2038), (rdev->mc.vram_end >> 12
), 0)
;
4169 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,r100_mm_wreg(rdev, (0x203C), (rdev->vram_scratch.gpu_addr >>
12), 0)
4170 rdev->vram_scratch.gpu_addr >> 12)r100_mm_wreg(rdev, (0x203C), (rdev->vram_scratch.gpu_addr >>
12), 0)
;
4171 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
4172 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
4173 WREG32(MC_VM_FB_LOCATION, tmp)r100_mm_wreg(rdev, (0x2024), (tmp), 0);
4174 /* XXX double check these! */
4175 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8))r100_mm_wreg(rdev, (0x2C04), ((rdev->mc.vram_start >>
8)), 0)
;
4176 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30))r100_mm_wreg(rdev, (0x2C08), ((2 << 7) | (1 << 30
)), 0)
;
4177 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF)r100_mm_wreg(rdev, (0x2C0C), (0x3FFFFFFF), 0);
4178 WREG32(MC_VM_AGP_BASE, 0)r100_mm_wreg(rdev, (0x2030), (0), 0);
4179 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF)r100_mm_wreg(rdev, (0x2028), (0x0FFFFFFF), 0);
4180 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF)r100_mm_wreg(rdev, (0x202C), (0x0FFFFFFF), 0);
4181 if (radeon_mc_wait_for_idle(rdev)(rdev)->asic->mc_wait_for_idle((rdev))) {
4182 dev_warn(rdev->dev, "Wait for MC idle timedout !\n")printf("drm:pid%d:%s *WARNING* " "Wait for MC idle timedout !\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4183 }
4184 evergreen_mc_resume(rdev, &save);
4185 if (!ASIC_IS_NODCE(rdev)((rdev->family == CHIP_HAINAN))) {
4186 /* we need to own VRAM, so turn off the VGA renderer here
4187 * to stop it overwriting our objects */
4188 rv515_vga_render_disable(rdev);
4189 }
4190}
4191
4192void si_vram_gtt_location(struct radeon_device *rdev,
4193 struct radeon_mc *mc)
4194{
4195 if (mc->mc_vram_size > 0xFFC0000000ULL) {
4196 /* leave room for at least 1024M GTT */
4197 dev_warn(rdev->dev, "limiting VRAM\n")printf("drm:pid%d:%s *WARNING* " "limiting VRAM\n", ({struct cpu_info
*__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof
(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_p->
ps_pid, __func__)
;
4198 mc->real_vram_size = 0xFFC0000000ULL;
4199 mc->mc_vram_size = 0xFFC0000000ULL;
4200 }
4201 radeon_vram_location(rdev, &rdev->mc, 0);
4202 rdev->mc.gtt_base_align = 0;
4203 radeon_gtt_location(rdev, mc);
4204}
4205
4206static int si_mc_init(struct radeon_device *rdev)
4207{
4208 u32 tmp;
4209 int chansize, numchan;
4210
4211 /* Get VRAM informations */
4212 rdev->mc.vram_is_ddr = true1;
4213 tmp = RREG32(MC_ARB_RAMCFG)r100_mm_rreg(rdev, (0x2760), 0);
4214 if (tmp & CHANSIZE_OVERRIDE(1 << 11)) {
4215 chansize = 16;
4216 } else if (tmp & CHANSIZE_MASK0x00000100) {
4217 chansize = 64;
4218 } else {
4219 chansize = 32;
4220 }
4221 tmp = RREG32(MC_SHARED_CHMAP)r100_mm_rreg(rdev, (0x2004), 0);
4222 switch ((tmp & NOOFCHAN_MASK0x0000f000) >> NOOFCHAN_SHIFT12) {
4223 case 0:
4224 default:
4225 numchan = 1;
4226 break;
4227 case 1:
4228 numchan = 2;
4229 break;
4230 case 2:
4231 numchan = 4;
4232 break;
4233 case 3:
4234 numchan = 8;
4235 break;
4236 case 4:
4237 numchan = 3;
4238 break;
4239 case 5:
4240 numchan = 6;
4241 break;
4242 case 6:
4243 numchan = 10;
4244 break;
4245 case 7:
4246 numchan = 12;
4247 break;
4248 case 8:
4249 numchan = 16;
4250 break;
4251 }
4252 rdev->mc.vram_width = numchan * chansize;
4253 /* Could aper size report 0 ? */
4254 rdev->mc.aper_base = rdev->fb_aper_offset;
4255 rdev->mc.aper_size = rdev->fb_aper_size;
4256 /* size in MB on si */
4257 tmp = RREG32(CONFIG_MEMSIZE)r100_mm_rreg(rdev, (0x5428), 0);
4258 /* some boards may have garbage in the upper 16 bits */
4259 if (tmp & 0xffff0000) {
4260 DRM_INFO("Probable bad vram size: 0x%08x\n", tmp)printk("\0016" "[" "drm" "] " "Probable bad vram size: 0x%08x\n"
, tmp)
;
4261 if (tmp & 0xffff)
4262 tmp &= 0xffff;
4263 }
4264 rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
4265 rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
4266 rdev->mc.visible_vram_size = rdev->mc.aper_size;
4267 si_vram_gtt_location(rdev, &rdev->mc);
4268 radeon_update_bandwidth_info(rdev);
4269
4270 return 0;
4271}
4272
4273/*
4274 * GART
4275 */
4276void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
4277{
4278 /* flush hdp cache */
4279 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1)r100_mm_wreg(rdev, (0x5480), (0x1), 0);
4280
4281 /* bits 0-15 are the VM contexts0-15 */
4282 WREG32(VM_INVALIDATE_REQUEST, 1)r100_mm_wreg(rdev, (0x1478), (1), 0);
4283}
4284
4285static int si_pcie_gart_enable(struct radeon_device *rdev)
4286{
4287 int r, i;
4288
4289 if (rdev->gart.robj == NULL((void *)0)) {
4290 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n")printf("drm:pid%d:%s *ERROR* " "No VRAM object for PCIE GART.\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4291 return -EINVAL22;
4292 }
4293 r = radeon_gart_table_vram_pin(rdev);
4294 if (r)
4295 return r;
4296 /* Setup TLB control */
4297 WREG32(MC_VM_MX_L1_TLB_CNTL,r100_mm_wreg(rdev, (0x2064), ((0xA << 7) | (1 << 0
) | (1 << 1) | (3 << 3) | (1 << 6) | (0 <<
5)), 0)
4298 (0xA << 7) |r100_mm_wreg(rdev, (0x2064), ((0xA << 7) | (1 << 0
) | (1 << 1) | (3 << 3) | (1 << 6) | (0 <<
5)), 0)
4299 ENABLE_L1_TLB |r100_mm_wreg(rdev, (0x2064), ((0xA << 7) | (1 << 0
) | (1 << 1) | (3 << 3) | (1 << 6) | (0 <<
5)), 0)
4300 ENABLE_L1_FRAGMENT_PROCESSING |r100_mm_wreg(rdev, (0x2064), ((0xA << 7) | (1 << 0
) | (1 << 1) | (3 << 3) | (1 << 6) | (0 <<
5)), 0)
4301 SYSTEM_ACCESS_MODE_NOT_IN_SYS |r100_mm_wreg(rdev, (0x2064), ((0xA << 7) | (1 << 0
) | (1 << 1) | (3 << 3) | (1 << 6) | (0 <<
5)), 0)
4302 ENABLE_ADVANCED_DRIVER_MODEL |r100_mm_wreg(rdev, (0x2064), ((0xA << 7) | (1 << 0
) | (1 << 1) | (3 << 3) | (1 << 6) | (0 <<
5)), 0)
4303 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU)r100_mm_wreg(rdev, (0x2064), ((0xA << 7) | (1 << 0
) | (1 << 1) | (3 << 3) | (1 << 6) | (0 <<
5)), 0)
;
4304 /* Setup L2 cache */
4305 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |r100_mm_wreg(rdev, (0x1400), ((1 << 0) | (1 << 1)
| (1 << 9) | (1 << 10) | (((7) & 7) <<
15) | (((1) & 3) << 19)), 0)
4306 ENABLE_L2_FRAGMENT_PROCESSING |r100_mm_wreg(rdev, (0x1400), ((1 << 0) | (1 << 1)
| (1 << 9) | (1 << 10) | (((7) & 7) <<
15) | (((1) & 3) << 19)), 0)
4307 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |r100_mm_wreg(rdev, (0x1400), ((1 << 0) | (1 << 1)
| (1 << 9) | (1 << 10) | (((7) & 7) <<
15) | (((1) & 3) << 19)), 0)
4308 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |r100_mm_wreg(rdev, (0x1400), ((1 << 0) | (1 << 1)
| (1 << 9) | (1 << 10) | (((7) & 7) <<
15) | (((1) & 3) << 19)), 0)
4309 EFFECTIVE_L2_QUEUE_SIZE(7) |r100_mm_wreg(rdev, (0x1400), ((1 << 0) | (1 << 1)
| (1 << 9) | (1 << 10) | (((7) & 7) <<
15) | (((1) & 3) << 19)), 0)
4310 CONTEXT1_IDENTITY_ACCESS_MODE(1))r100_mm_wreg(rdev, (0x1400), ((1 << 0) | (1 << 1)
| (1 << 9) | (1 << 10) | (((7) & 7) <<
15) | (((1) & 3) << 19)), 0)
;
4311 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE)r100_mm_wreg(rdev, (0x1404), ((1 << 0) | (1 << 1)
), 0)
;
4312 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |r100_mm_wreg(rdev, (0x1408), ((1 << 20) | ((4) <<
0) | ((4) << 15)), 0)
4313 BANK_SELECT(4) |r100_mm_wreg(rdev, (0x1408), ((1 << 20) | ((4) <<
0) | ((4) << 15)), 0)
4314 L2_CACHE_BIGK_FRAGMENT_SIZE(4))r100_mm_wreg(rdev, (0x1408), ((1 << 20) | ((4) <<
0) | ((4) << 15)), 0)
;
4315 /* setup context0 */
4316 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12)r100_mm_wreg(rdev, (0x155c), (rdev->mc.gtt_start >> 12
), 0)
;
4317 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12)r100_mm_wreg(rdev, (0x157C), (rdev->mc.gtt_end >> 12
), 0)
;
4318 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12)r100_mm_wreg(rdev, (0x153c), (rdev->gart.table_addr >>
12), 0)
;
4319 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,r100_mm_wreg(rdev, (0x1518), ((u32)(rdev->dummy_page.addr >>
12)), 0)
4320 (u32)(rdev->dummy_page.addr >> 12))r100_mm_wreg(rdev, (0x1518), ((u32)(rdev->dummy_page.addr >>
12)), 0)
;
4321 WREG32(VM_CONTEXT0_CNTL2, 0)r100_mm_wreg(rdev, (0x1430), (0), 0);
4322 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |r100_mm_wreg(rdev, (0x1410), (((1 << 0) | (((0) & 3
) << 1) | (1 << 4))), 0)
4323 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT))r100_mm_wreg(rdev, (0x1410), (((1 << 0) | (((0) & 3
) << 1) | (1 << 4))), 0)
;
4324
4325 WREG32(0x15D4, 0)r100_mm_wreg(rdev, (0x15D4), (0), 0);
4326 WREG32(0x15D8, 0)r100_mm_wreg(rdev, (0x15D8), (0), 0);
4327 WREG32(0x15DC, 0)r100_mm_wreg(rdev, (0x15DC), (0), 0);
4328
4329 /* empty context1-15 */
4330 /* set vm size, must be a multiple of 4 */
4331 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0)r100_mm_wreg(rdev, (0x1560), (0), 0);
4332 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1)r100_mm_wreg(rdev, (0x1580), (rdev->vm_manager.max_pfn - 1
), 0)
;
4333 /* Assign the pt base to something valid for now; the pts used for
4334 * the VMs are determined by the application and setup and assigned
4335 * on the fly in the vm part of radeon_gart.c
4336 */
4337 for (i = 1; i < 16; i++) {
4338 if (i < 8)
4339 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),r100_mm_wreg(rdev, (0x153c + (i << 2)), (rdev->vm_manager
.saved_table_addr[i]), 0)
4340 rdev->vm_manager.saved_table_addr[i])r100_mm_wreg(rdev, (0x153c + (i << 2)), (rdev->vm_manager
.saved_table_addr[i]), 0)
;
4341 else
4342 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),r100_mm_wreg(rdev, (0x1438 + ((i - 8) << 2)), (rdev->
vm_manager.saved_table_addr[i]), 0)
4343 rdev->vm_manager.saved_table_addr[i])r100_mm_wreg(rdev, (0x1438 + ((i - 8) << 2)), (rdev->
vm_manager.saved_table_addr[i]), 0)
;
4344 }
4345
4346 /* enable context1-15 */
4347 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,r100_mm_wreg(rdev, (0x151c), ((u32)(rdev->dummy_page.addr >>
12)), 0)
4348 (u32)(rdev->dummy_page.addr >> 12))r100_mm_wreg(rdev, (0x151c), ((u32)(rdev->dummy_page.addr >>
12)), 0)
;
4349 WREG32(VM_CONTEXT1_CNTL2, 4)r100_mm_wreg(rdev, (0x1434), (4), 0);
4350 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4351 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4352 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4353 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4354 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4355 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4356 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4357 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4358 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4359 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4360 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4361 READ_PROTECTION_FAULT_ENABLE_DEFAULT |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4362 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
4363 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT)r100_mm_wreg(rdev, (0x1414), ((1 << 0) | (((1) & 3)
<< 1) | (((radeon_vm_block_size - 9) & 0xF) <<
24) | (1 << 3) | (1 << 4) | (1 << 6) | (1 <<
7) | (1 << 9) | (1 << 10) | (1 << 12) | (1
<< 13) | (1 << 15) | (1 << 16) | (1 <<
18) | (1 << 19)), 0)
;
4364
4365 si_pcie_gart_tlb_flush(rdev);
4366 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",printk("\0016" "[" "drm" "] " "PCIE GART of %uM enabled (table at 0x%016llX).\n"
, (unsigned)(rdev->mc.gtt_size >> 20), (unsigned long
long)rdev->gart.table_addr)
4367 (unsigned)(rdev->mc.gtt_size >> 20),printk("\0016" "[" "drm" "] " "PCIE GART of %uM enabled (table at 0x%016llX).\n"
, (unsigned)(rdev->mc.gtt_size >> 20), (unsigned long
long)rdev->gart.table_addr)
4368 (unsigned long long)rdev->gart.table_addr)printk("\0016" "[" "drm" "] " "PCIE GART of %uM enabled (table at 0x%016llX).\n"
, (unsigned)(rdev->mc.gtt_size >> 20), (unsigned long
long)rdev->gart.table_addr)
;
4369 rdev->gart.ready = true1;
4370 return 0;
4371}
4372
4373static void si_pcie_gart_disable(struct radeon_device *rdev)
4374{
4375 unsigned i;
4376
4377 for (i = 1; i < 16; ++i) {
4378 uint32_t reg;
4379 if (i < 8)
4380 reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR0x153c + (i << 2);
4381 else
4382 reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR0x1438 + ((i - 8) << 2);
4383 rdev->vm_manager.saved_table_addr[i] = RREG32(reg)r100_mm_rreg(rdev, (reg), 0);
4384 }
4385
4386 /* Disable all tables */
4387 WREG32(VM_CONTEXT0_CNTL, 0)r100_mm_wreg(rdev, (0x1410), (0), 0);
4388 WREG32(VM_CONTEXT1_CNTL, 0)r100_mm_wreg(rdev, (0x1414), (0), 0);
4389 /* Setup TLB control */
4390 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |r100_mm_wreg(rdev, (0x2064), ((3 << 3) | (0 << 5)
), 0)
4391 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU)r100_mm_wreg(rdev, (0x2064), ((3 << 3) | (0 << 5)
), 0)
;
4392 /* Setup L2 cache */
4393 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |r100_mm_wreg(rdev, (0x1400), ((1 << 9) | (1 << 10
) | (((7) & 7) << 15) | (((1) & 3) << 19)
), 0)
4394 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |r100_mm_wreg(rdev, (0x1400), ((1 << 9) | (1 << 10
) | (((7) & 7) << 15) | (((1) & 3) << 19)
), 0)
4395 EFFECTIVE_L2_QUEUE_SIZE(7) |r100_mm_wreg(rdev, (0x1400), ((1 << 9) | (1 << 10
) | (((7) & 7) << 15) | (((1) & 3) << 19)
), 0)
4396 CONTEXT1_IDENTITY_ACCESS_MODE(1))r100_mm_wreg(rdev, (0x1400), ((1 << 9) | (1 << 10
) | (((7) & 7) << 15) | (((1) & 3) << 19)
), 0)
;
4397 WREG32(VM_L2_CNTL2, 0)r100_mm_wreg(rdev, (0x1404), (0), 0);
4398 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |r100_mm_wreg(rdev, (0x1408), ((1 << 20) | ((0) <<
15)), 0)
4399 L2_CACHE_BIGK_FRAGMENT_SIZE(0))r100_mm_wreg(rdev, (0x1408), ((1 << 20) | ((0) <<
15)), 0)
;
4400 radeon_gart_table_vram_unpin(rdev);
4401}
4402
4403static void si_pcie_gart_fini(struct radeon_device *rdev)
4404{
4405 si_pcie_gart_disable(rdev);
4406 radeon_gart_table_vram_free(rdev);
4407 radeon_gart_fini(rdev);
4408}
4409
4410/* vm parser */
4411static bool_Bool si_vm_reg_valid(u32 reg)
4412{
4413 /* context regs are fine */
4414 if (reg >= 0x28000)
4415 return true1;
4416
4417 /* shader regs are also fine */
4418 if (reg >= 0xB000 && reg < 0xC000)
4419 return true1;
4420
4421 /* check config regs */
4422 switch (reg) {
4423 case GRBM_GFX_INDEX0x802C:
4424 case CP_STRMOUT_CNTL0x84FC:
4425 case VGT_VTX_VECT_EJECT_REG0x88B0:
4426 case VGT_CACHE_INVALIDATION0x88C4:
4427 case VGT_ESGS_RING_SIZE0x88C8:
4428 case VGT_GSVS_RING_SIZE0x88CC:
4429 case VGT_GS_VERTEX_REUSE0x88D4:
4430 case VGT_PRIMITIVE_TYPE0x8958:
4431 case VGT_INDEX_TYPE0x895C:
4432 case VGT_NUM_INDICES0x8970:
4433 case VGT_NUM_INSTANCES0x8974:
4434 case VGT_TF_RING_SIZE0x8988:
4435 case VGT_HS_OFFCHIP_PARAM0x89B0:
4436 case VGT_TF_MEMORY_BASE0x89B8:
4437 case PA_CL_ENHANCE0x8A14:
4438 case PA_SU_LINE_STIPPLE_VALUE0x8A60:
4439 case PA_SC_LINE_STIPPLE_STATE0x8B10:
4440 case PA_SC_ENHANCE0x8BF0:
4441 case SQC_CACHES0x8C08:
4442 case SPI_STATIC_THREAD_MGMT_10x90E0:
4443 case SPI_STATIC_THREAD_MGMT_20x90E4:
4444 case SPI_STATIC_THREAD_MGMT_30x90E8:
4445 case SPI_PS_MAX_WAVE_ID0x90EC:
4446 case SPI_CONFIG_CNTL0x9100:
4447 case SPI_CONFIG_CNTL_10x913C:
4448 case TA_CNTL_AUX0x9508:
4449 case TA_CS_BC_BASE_ADDR0x950C:
4450 return true1;
4451 default:
4452 DRM_ERROR("Invalid register 0x%x in CS\n", reg)__drm_err("Invalid register 0x%x in CS\n", reg);
4453 return false0;
4454 }
4455}
4456
4457static int si_vm_packet3_ce_check(struct radeon_device *rdev,
4458 u32 *ib, struct radeon_cs_packet *pkt)
4459{
4460 switch (pkt->opcode) {
4461 case PACKET3_NOP0x10:
4462 case PACKET3_SET_BASE0x11:
4463 case PACKET3_SET_CE_DE_COUNTERS0x89:
4464 case PACKET3_LOAD_CONST_RAM0x80:
4465 case PACKET3_WRITE_CONST_RAM0x81:
4466 case PACKET3_WRITE_CONST_RAM_OFFSET0x82:
4467 case PACKET3_DUMP_CONST_RAM0x83:
4468 case PACKET3_INCREMENT_CE_COUNTER0x84:
4469 case PACKET3_WAIT_ON_DE_COUNTER0x87:
4470 case PACKET3_CE_WRITE0x7F:
4471 break;
4472 default:
4473 DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode)__drm_err("Invalid CE packet3: 0x%x\n", pkt->opcode);
4474 return -EINVAL22;
4475 }
4476 return 0;
4477}
4478
4479static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
4480{
4481 u32 start_reg, reg, i;
4482 u32 command = ib[idx + 4];
4483 u32 info = ib[idx + 1];
4484 u32 idx_value = ib[idx];
4485 if (command & PACKET3_CP_DMA_CMD_SAS(1 << 26)) {
4486 /* src address space is register */
4487 if (((info & 0x60000000) >> 29) == 0) {
4488 start_reg = idx_value << 2;
4489 if (command & PACKET3_CP_DMA_CMD_SAIC(1 << 28)) {
4490 reg = start_reg;
4491 if (!si_vm_reg_valid(reg)) {
4492 DRM_ERROR("CP DMA Bad SRC register\n")__drm_err("CP DMA Bad SRC register\n");
4493 return -EINVAL22;
4494 }
4495 } else {
4496 for (i = 0; i < (command & 0x1fffff); i++) {
4497 reg = start_reg + (4 * i);
4498 if (!si_vm_reg_valid(reg)) {
4499 DRM_ERROR("CP DMA Bad SRC register\n")__drm_err("CP DMA Bad SRC register\n");
4500 return -EINVAL22;
4501 }
4502 }
4503 }
4504 }
4505 }
4506 if (command & PACKET3_CP_DMA_CMD_DAS(1 << 27)) {
4507 /* dst address space is register */
4508 if (((info & 0x00300000) >> 20) == 0) {
4509 start_reg = ib[idx + 2];
4510 if (command & PACKET3_CP_DMA_CMD_DAIC(1 << 29)) {
4511 reg = start_reg;
4512 if (!si_vm_reg_valid(reg)) {
4513 DRM_ERROR("CP DMA Bad DST register\n")__drm_err("CP DMA Bad DST register\n");
4514 return -EINVAL22;
4515 }
4516 } else {
4517 for (i = 0; i < (command & 0x1fffff); i++) {
4518 reg = start_reg + (4 * i);
4519 if (!si_vm_reg_valid(reg)) {
4520 DRM_ERROR("CP DMA Bad DST register\n")__drm_err("CP DMA Bad DST register\n");
4521 return -EINVAL22;
4522 }
4523 }
4524 }
4525 }
4526 }
4527 return 0;
4528}
4529
4530static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4531 u32 *ib, struct radeon_cs_packet *pkt)
4532{
4533 int r;
4534 u32 idx = pkt->idx + 1;
4535 u32 idx_value = ib[idx];
4536 u32 start_reg, end_reg, reg, i;
4537
4538 switch (pkt->opcode) {
4539 case PACKET3_NOP0x10:
4540 case PACKET3_SET_BASE0x11:
4541 case PACKET3_CLEAR_STATE0x12:
4542 case PACKET3_INDEX_BUFFER_SIZE0x13:
4543 case PACKET3_DISPATCH_DIRECT0x15:
4544 case PACKET3_DISPATCH_INDIRECT0x16:
4545 case PACKET3_ALLOC_GDS0x1B:
4546 case PACKET3_WRITE_GDS_RAM0x1C:
4547 case PACKET3_ATOMIC_GDS0x1D:
4548 case PACKET3_ATOMIC0x1E:
4549 case PACKET3_OCCLUSION_QUERY0x1F:
4550 case PACKET3_SET_PREDICATION0x20:
4551 case PACKET3_COND_EXEC0x22:
4552 case PACKET3_PRED_EXEC0x23:
4553 case PACKET3_DRAW_INDIRECT0x24:
4554 case PACKET3_DRAW_INDEX_INDIRECT0x25:
4555 case PACKET3_INDEX_BASE0x26:
4556 case PACKET3_DRAW_INDEX_20x27:
4557 case PACKET3_CONTEXT_CONTROL0x28:
4558 case PACKET3_INDEX_TYPE0x2A:
4559 case PACKET3_DRAW_INDIRECT_MULTI0x2C:
4560 case PACKET3_DRAW_INDEX_AUTO0x2D:
4561 case PACKET3_DRAW_INDEX_IMMD0x2E:
4562 case PACKET3_NUM_INSTANCES0x2F:
4563 case PACKET3_DRAW_INDEX_MULTI_AUTO0x30:
4564 case PACKET3_STRMOUT_BUFFER_UPDATE0x34:
4565 case PACKET3_DRAW_INDEX_OFFSET_20x35:
4566 case PACKET3_DRAW_INDEX_MULTI_ELEMENT0x36:
4567 case PACKET3_DRAW_INDEX_INDIRECT_MULTI0x38:
4568 case PACKET3_MPEG_INDEX0x3A:
4569 case PACKET3_WAIT_REG_MEM0x3C:
4570 case PACKET3_MEM_WRITE0x3D:
4571 case PACKET3_PFP_SYNC_ME0x42:
4572 case PACKET3_SURFACE_SYNC0x43:
4573 case PACKET3_EVENT_WRITE0x46:
4574 case PACKET3_EVENT_WRITE_EOP0x47:
4575 case PACKET3_EVENT_WRITE_EOS0x48:
4576 case PACKET3_SET_CONTEXT_REG0x69:
4577 case PACKET3_SET_CONTEXT_REG_INDIRECT0x73:
4578 case PACKET3_SET_SH_REG0x76:
4579 case PACKET3_SET_SH_REG_OFFSET0x77:
4580 case PACKET3_INCREMENT_DE_COUNTER0x85:
4581 case PACKET3_WAIT_ON_CE_COUNTER0x86:
4582 case PACKET3_WAIT_ON_AVAIL_BUFFER0x8A:
4583 case PACKET3_ME_WRITE0x7A:
4584 break;
4585 case PACKET3_COPY_DATA0x40:
4586 if ((idx_value & 0xf00) == 0) {
4587 reg = ib[idx + 3] * 4;
4588 if (!si_vm_reg_valid(reg))
4589 return -EINVAL22;
4590 }
4591 break;
4592 case PACKET3_WRITE_DATA0x37:
4593 if ((idx_value & 0xf00) == 0) {
4594 start_reg = ib[idx + 1] * 4;
4595 if (idx_value & 0x10000) {
4596 if (!si_vm_reg_valid(start_reg))
4597 return -EINVAL22;
4598 } else {
4599 for (i = 0; i < (pkt->count - 2); i++) {
4600 reg = start_reg + (4 * i);
4601 if (!si_vm_reg_valid(reg))
4602 return -EINVAL22;
4603 }
4604 }
4605 }
4606 break;
4607 case PACKET3_COND_WRITE0x45:
4608 if (idx_value & 0x100) {
4609 reg = ib[idx + 5] * 4;
4610 if (!si_vm_reg_valid(reg))
4611 return -EINVAL22;
4612 }
4613 break;
4614 case PACKET3_COPY_DW0x3B:
4615 if (idx_value & 0x2) {
4616 reg = ib[idx + 3] * 4;
4617 if (!si_vm_reg_valid(reg))
4618 return -EINVAL22;
4619 }
4620 break;
4621 case PACKET3_SET_CONFIG_REG0x68:
4622 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START0x00008000;
4623 end_reg = 4 * pkt->count + start_reg - 4;
4624 if ((start_reg < PACKET3_SET_CONFIG_REG_START0x00008000) ||
4625 (start_reg >= PACKET3_SET_CONFIG_REG_END0x0000b000) ||
4626 (end_reg >= PACKET3_SET_CONFIG_REG_END0x0000b000)) {
4627 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n")__drm_err("bad PACKET3_SET_CONFIG_REG\n");
4628 return -EINVAL22;
4629 }
4630 for (i = 0; i < pkt->count; i++) {
4631 reg = start_reg + (4 * i);
4632 if (!si_vm_reg_valid(reg))
4633 return -EINVAL22;
4634 }
4635 break;
4636 case PACKET3_CP_DMA0x41:
4637 r = si_vm_packet3_cp_dma_check(ib, idx);
4638 if (r)
4639 return r;
4640 break;
4641 default:
4642 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode)__drm_err("Invalid GFX packet3: 0x%x\n", pkt->opcode);
4643 return -EINVAL22;
4644 }
4645 return 0;
4646}
4647
4648static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4649 u32 *ib, struct radeon_cs_packet *pkt)
4650{
4651 int r;
4652 u32 idx = pkt->idx + 1;
4653 u32 idx_value = ib[idx];
4654 u32 start_reg, reg, i;
4655
4656 switch (pkt->opcode) {
4657 case PACKET3_NOP0x10:
4658 case PACKET3_SET_BASE0x11:
4659 case PACKET3_CLEAR_STATE0x12:
4660 case PACKET3_DISPATCH_DIRECT0x15:
4661 case PACKET3_DISPATCH_INDIRECT0x16:
4662 case PACKET3_ALLOC_GDS0x1B:
4663 case PACKET3_WRITE_GDS_RAM0x1C:
4664 case PACKET3_ATOMIC_GDS0x1D:
4665 case PACKET3_ATOMIC0x1E:
4666 case PACKET3_OCCLUSION_QUERY0x1F:
4667 case PACKET3_SET_PREDICATION0x20:
4668 case PACKET3_COND_EXEC0x22:
4669 case PACKET3_PRED_EXEC0x23:
4670 case PACKET3_CONTEXT_CONTROL0x28:
4671 case PACKET3_STRMOUT_BUFFER_UPDATE0x34:
4672 case PACKET3_WAIT_REG_MEM0x3C:
4673 case PACKET3_MEM_WRITE0x3D:
4674 case PACKET3_PFP_SYNC_ME0x42:
4675 case PACKET3_SURFACE_SYNC0x43:
4676 case PACKET3_EVENT_WRITE0x46:
4677 case PACKET3_EVENT_WRITE_EOP0x47:
4678 case PACKET3_EVENT_WRITE_EOS0x48:
4679 case PACKET3_SET_CONTEXT_REG0x69:
4680 case PACKET3_SET_CONTEXT_REG_INDIRECT0x73:
4681 case PACKET3_SET_SH_REG0x76:
4682 case PACKET3_SET_SH_REG_OFFSET0x77:
4683 case PACKET3_INCREMENT_DE_COUNTER0x85:
4684 case PACKET3_WAIT_ON_CE_COUNTER0x86:
4685 case PACKET3_WAIT_ON_AVAIL_BUFFER0x8A:
4686 case PACKET3_ME_WRITE0x7A:
4687 break;
4688 case PACKET3_COPY_DATA0x40:
4689 if ((idx_value & 0xf00) == 0) {
4690 reg = ib[idx + 3] * 4;
4691 if (!si_vm_reg_valid(reg))
4692 return -EINVAL22;
4693 }
4694 break;
4695 case PACKET3_WRITE_DATA0x37:
4696 if ((idx_value & 0xf00) == 0) {
4697 start_reg = ib[idx + 1] * 4;
4698 if (idx_value & 0x10000) {
4699 if (!si_vm_reg_valid(start_reg))
4700 return -EINVAL22;
4701 } else {
4702 for (i = 0; i < (pkt->count - 2); i++) {
4703 reg = start_reg + (4 * i);
4704 if (!si_vm_reg_valid(reg))
4705 return -EINVAL22;
4706 }
4707 }
4708 }
4709 break;
4710 case PACKET3_COND_WRITE0x45:
4711 if (idx_value & 0x100) {
4712 reg = ib[idx + 5] * 4;
4713 if (!si_vm_reg_valid(reg))
4714 return -EINVAL22;
4715 }
4716 break;
4717 case PACKET3_COPY_DW0x3B:
4718 if (idx_value & 0x2) {
4719 reg = ib[idx + 3] * 4;
4720 if (!si_vm_reg_valid(reg))
4721 return -EINVAL22;
4722 }
4723 break;
4724 case PACKET3_CP_DMA0x41:
4725 r = si_vm_packet3_cp_dma_check(ib, idx);
4726 if (r)
4727 return r;
4728 break;
4729 default:
4730 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode)__drm_err("Invalid Compute packet3: 0x%x\n", pkt->opcode);
4731 return -EINVAL22;
4732 }
4733 return 0;
4734}
4735
4736int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4737{
4738 int ret = 0;
4739 u32 idx = 0, i;
4740 struct radeon_cs_packet pkt;
4741
4742 do {
4743 pkt.idx = idx;
4744 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx])(((ib->ptr[idx]) >> 30) & 3);
4745 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx])(((ib->ptr[idx]) >> 16) & 0x3FFF);
4746 pkt.one_reg_wr = 0;
4747 switch (pkt.type) {
4748 case RADEON_PACKET_TYPE00:
4749 dev_err(rdev->dev, "Packet0 not allowed!\n")printf("drm:pid%d:%s *ERROR* " "Packet0 not allowed!\n", ({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_curproc->p_p->ps_pid, __func__)
;
4750 ret = -EINVAL22;
4751 break;
4752 case RADEON_PACKET_TYPE22:
4753 idx += 1;
4754 break;
4755 case RADEON_PACKET_TYPE33:
4756 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx])(((ib->ptr[idx]) >> 8) & 0xFF);
4757 if (ib->is_const_ib)
4758 ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
4759 else {
4760 switch (ib->ring) {
4761 case RADEON_RING_TYPE_GFX_INDEX0:
4762 ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
4763 break;
4764 case CAYMAN_RING_TYPE_CP1_INDEX1:
4765 case CAYMAN_RING_TYPE_CP2_INDEX2:
4766 ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
4767 break;
4768 default:
4769 dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring)printf("drm:pid%d:%s *ERROR* " "Non-PM4 ring %d !\n", ({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_curproc->p_p->ps_pid, __func__ , ib->ring)
;
4770 ret = -EINVAL22;
4771 break;
4772 }
4773 }
4774 idx += pkt.count + 2;
4775 break;
4776 default:
4777 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type)printf("drm:pid%d:%s *ERROR* " "Unknown packet type %d !\n", (
{struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , pkt.type
)
;
4778 ret = -EINVAL22;
4779 break;
4780 }
4781 if (ret) {
4782 for (i = 0; i < ib->length_dw; i++) {
4783 if (i == idx)
4784 printk("\t0x%08x <---\n", ib->ptr[i]);
4785 else
4786 printk("\t0x%08x\n", ib->ptr[i]);
4787 }
4788 break;
4789 }
4790 } while (idx < ib->length_dw);
4791
4792 return ret;
4793}
4794
4795/*
4796 * vm
4797 */
4798int si_vm_init(struct radeon_device *rdev)
4799{
4800 /* number of VMs */
4801 rdev->vm_manager.nvm = 16;
4802 /* base offset of vram pages */
4803 rdev->vm_manager.vram_base_offset = 0;
4804
4805 return 0;
4806}
4807
4808void si_vm_fini(struct radeon_device *rdev)
4809{
4810}
4811
4812/**
4813 * si_vm_decode_fault - print human readable fault info
4814 *
4815 * @rdev: radeon_device pointer
4816 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
4817 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
4818 *
4819 * Print human readable fault information (SI).
4820 */
4821static void si_vm_decode_fault(struct radeon_device *rdev,
4822 u32 status, u32 addr)
4823{
4824 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK(0xff << 12)) >> MEMORY_CLIENT_ID_SHIFT12;
4825 u32 vmid = (status & FAULT_VMID_MASK(0xf << 25)) >> FAULT_VMID_SHIFT25;
4826 u32 protections = (status & PROTECTIONS_MASK(0xf << 0)) >> PROTECTIONS_SHIFT0;
4827 char *block;
4828
4829 if (rdev->family == CHIP_TAHITI) {
4830 switch (mc_id) {
4831 case 160:
4832 case 144:
4833 case 96:
4834 case 80:
4835 case 224:
4836 case 208:
4837 case 32:
4838 case 16:
4839 block = "CB";
4840 break;
4841 case 161:
4842 case 145:
4843 case 97:
4844 case 81:
4845 case 225:
4846 case 209:
4847 case 33:
4848 case 17:
4849 block = "CB_FMASK";
4850 break;
4851 case 162:
4852 case 146:
4853 case 98:
4854 case 82:
4855 case 226:
4856 case 210:
4857 case 34:
4858 case 18:
4859 block = "CB_CMASK";
4860 break;
4861 case 163:
4862 case 147:
4863 case 99:
4864 case 83:
4865 case 227:
4866 case 211:
4867 case 35:
4868 case 19:
4869 block = "CB_IMMED";
4870 break;
4871 case 164:
4872 case 148:
4873 case 100:
4874 case 84:
4875 case 228:
4876 case 212:
4877 case 36:
4878 case 20:
4879 block = "DB";
4880 break;
4881 case 165:
4882 case 149:
4883 case 101:
4884 case 85:
4885 case 229:
4886 case 213:
4887 case 37:
4888 case 21:
4889 block = "DB_HTILE";
4890 break;
4891 case 167:
4892 case 151:
4893 case 103:
4894 case 87:
4895 case 231:
4896 case 215:
4897 case 39:
4898 case 23:
4899 block = "DB_STEN";
4900 break;
4901 case 72:
4902 case 68:
4903 case 64:
4904 case 8:
4905 case 4:
4906 case 0:
4907 case 136:
4908 case 132:
4909 case 128:
4910 case 200:
4911 case 196:
4912 case 192:
4913 block = "TC";
4914 break;
4915 case 112:
4916 case 48:
4917 block = "CP";
4918 break;
4919 case 49:
4920 case 177:
4921 case 50:
4922 case 178:
4923 block = "SH";
4924 break;
4925 case 53:
4926 case 190:
4927 block = "VGT";
4928 break;
4929 case 117:
4930 block = "IH";
4931 break;
4932 case 51:
4933 case 115:
4934 block = "RLC";
4935 break;
4936 case 119:
4937 case 183:
4938 block = "DMA0";
4939 break;
4940 case 61:
4941 block = "DMA1";
4942 break;
4943 case 248:
4944 case 120:
4945 block = "HDP";
4946 break;
4947 default:
4948 block = "unknown";
4949 break;
4950 }
4951 } else {
4952 switch (mc_id) {
4953 case 32:
4954 case 16:
4955 case 96:
4956 case 80:
4957 case 160:
4958 case 144:
4959 case 224:
4960 case 208:
4961 block = "CB";
4962 break;
4963 case 33:
4964 case 17:
4965 case 97:
4966 case 81:
4967 case 161:
4968 case 145:
4969 case 225:
4970 case 209:
4971 block = "CB_FMASK";
4972 break;
4973 case 34:
4974 case 18:
4975 case 98:
4976 case 82:
4977 case 162:
4978 case 146:
4979 case 226:
4980 case 210:
4981 block = "CB_CMASK";
4982 break;
4983 case 35:
4984 case 19:
4985 case 99:
4986 case 83:
4987 case 163:
4988 case 147:
4989 case 227:
4990 case 211:
4991 block = "CB_IMMED";
4992 break;
4993 case 36:
4994 case 20:
4995 case 100:
4996 case 84:
4997 case 164:
4998 case 148:
4999 case 228:
5000 case 212:
5001 block = "DB";
5002 break;
5003 case 37:
5004 case 21:
5005 case 101:
5006 case 85:
5007 case 165:
5008 case 149:
5009 case 229:
5010 case 213:
5011 block = "DB_HTILE";
5012 break;
5013 case 39:
5014 case 23:
5015 case 103:
5016 case 87:
5017 case 167:
5018 case 151:
5019 case 231:
5020 case 215:
5021 block = "DB_STEN";
5022 break;
5023 case 72:
5024 case 68:
5025 case 8:
5026 case 4:
5027 case 136:
5028 case 132:
5029 case 200:
5030 case 196:
5031 block = "TC";
5032 break;
5033 case 112:
5034 case 48:
5035 block = "CP";
5036 break;
5037 case 49:
5038 case 177:
5039 case 50:
5040 case 178:
5041 block = "SH";
5042 break;
5043 case 53:
5044 block = "VGT";
5045 break;
5046 case 117:
5047 block = "IH";
5048 break;
5049 case 51:
5050 case 115:
5051 block = "RLC";
5052 break;
5053 case 119:
5054 case 183:
5055 block = "DMA0";
5056 break;
5057 case 61:
5058 block = "DMA1";
5059 break;
5060 case 248:
5061 case 120:
5062 block = "HDP";
5063 break;
5064 default:
5065 block = "unknown";
5066 break;
5067 }
5068 }
5069
5070 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
5071 protections, vmid, addr,
5072 (status & MEMORY_CLIENT_RW_MASK(1 << 24)) ? "write" : "read",
5073 block, mc_id);
5074}
5075
5076void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
5077 unsigned vm_id, uint64_t pd_addr)
5078{
5079 /* write new base address */
5080 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)((3 << 30) | (((0x37) & 0xFF) << 8) | ((3) &
0x3FFF) << 16)
);
5081 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1)((1) << 30) |
5082 WRITE_DATA_DST_SEL(0)((0) << 8)));
5083
5084 if (vm_id < 8) {
5085 radeon_ring_write(ring,
5086 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR0x153c + (vm_id << 2)) >> 2);
5087 } else {
5088 radeon_ring_write(ring,
5089 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR0x1438 + ((vm_id - 8) << 2)) >> 2);
5090 }
5091 radeon_ring_write(ring, 0);
5092 radeon_ring_write(ring, pd_addr >> 12);
5093
5094 /* flush hdp cache */
5095 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)((3 << 30) | (((0x37) & 0xFF) << 8) | ((3) &
0x3FFF) << 16)
);
5096 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1)((1) << 30) |
5097 WRITE_DATA_DST_SEL(0)((0) << 8)));
5098 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL0x5480 >> 2);
5099 radeon_ring_write(ring, 0);
5100 radeon_ring_write(ring, 0x1);
5101
5102 /* bits 0-15 are the VM contexts0-15 */
5103 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)((3 << 30) | (((0x37) & 0xFF) << 8) | ((3) &
0x3FFF) << 16)
);
5104 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1)((1) << 30) |
5105 WRITE_DATA_DST_SEL(0)((0) << 8)));
5106 radeon_ring_write(ring, VM_INVALIDATE_REQUEST0x1478 >> 2);
5107 radeon_ring_write(ring, 0);
5108 radeon_ring_write(ring, 1 << vm_id);
5109
5110 /* wait for the invalidate to complete */
5111 radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)((3 << 30) | (((0x3C) & 0xFF) << 8) | ((5) &
0x3FFF) << 16)
);
5112 radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0)((0) << 0) | /* always */
5113 WAIT_REG_MEM_ENGINE(0)((0) << 8))); /* me */
5114 radeon_ring_write(ring, VM_INVALIDATE_REQUEST0x1478 >> 2);
5115 radeon_ring_write(ring, 0);
5116 radeon_ring_write(ring, 0); /* ref */
5117 radeon_ring_write(ring, 0); /* mask */
5118 radeon_ring_write(ring, 0x20); /* poll interval */
5119
5120 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5121 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)((3 << 30) | (((0x42) & 0xFF) << 8) | ((0) &
0x3FFF) << 16)
);
5122 radeon_ring_write(ring, 0x0);
5123}
5124
5125/*
5126 * Power and clock gating
5127 */
5128static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
5129{
5130 int i;
5131
5132 for (i = 0; i < rdev->usec_timeout; i++) {
5133 if (RREG32(RLC_SERDES_MASTER_BUSY_0)r100_mm_rreg(rdev, (0xC464), 0) == 0)
5134 break;
5135 udelay(1);
5136 }
5137
5138 for (i = 0; i < rdev->usec_timeout; i++) {
5139 if (RREG32(RLC_SERDES_MASTER_BUSY_1)r100_mm_rreg(rdev, (0xC468), 0) == 0)
5140 break;
5141 udelay(1);
5142 }
5143}
5144
5145static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
5146 bool_Bool enable)
5147{
5148 u32 tmp = RREG32(CP_INT_CNTL_RING0)r100_mm_rreg(rdev, (0xC1A8), 0);
5149 u32 mask;
5150 int i;
5151
5152 if (enable)
5153 tmp |= (CNTX_BUSY_INT_ENABLE(1 << 19) | CNTX_EMPTY_INT_ENABLE(1 << 20));
5154 else
5155 tmp &= ~(CNTX_BUSY_INT_ENABLE(1 << 19) | CNTX_EMPTY_INT_ENABLE(1 << 20));
5156 WREG32(CP_INT_CNTL_RING0, tmp)r100_mm_wreg(rdev, (0xC1A8), (tmp), 0);
5157
5158 if (!enable) {
5159 /* read a gfx register */
5160 tmp = RREG32(DB_DEPTH_INFO)r100_mm_rreg(rdev, (0x2803c), 0);
5161
5162 mask = RLC_BUSY_STATUS(1 << 0) | GFX_POWER_STATUS(1 << 1) | GFX_CLOCK_STATUS(1 << 2) | GFX_LS_STATUS(1 << 3);
5163 for (i = 0; i < rdev->usec_timeout; i++) {
5164 if ((RREG32(RLC_STAT)r100_mm_rreg(rdev, (0xC34C), 0) & mask) == (GFX_CLOCK_STATUS(1 << 2) | GFX_POWER_STATUS(1 << 1)))
5165 break;
5166 udelay(1);
5167 }
5168 }
5169}
5170
5171static void si_set_uvd_dcm(struct radeon_device *rdev,
5172 bool_Bool sw_mode)
5173{
5174 u32 tmp, tmp2;
5175
5176 tmp = RREG32(UVD_CGC_CTRL)r100_mm_rreg(rdev, (0xF4B0), 0);
5177 tmp &= ~(CLK_OD_MASK(0x1f << 6) | CG_DT_MASK(0xf << 2));
5178 tmp |= DCM(1 << 0) | CG_DT(1)((1) << 2) | CLK_OD(4)((4) << 6);
5179
5180 if (sw_mode) {
5181 tmp &= ~0x7ffff800;
5182 tmp2 = DYN_OR_EN(1 << 0) | DYN_RR_EN(1 << 1) | G_DIV_ID(7)((7) << 2);
5183 } else {
5184 tmp |= 0x7ffff800;
5185 tmp2 = 0;
5186 }
5187
5188 WREG32(UVD_CGC_CTRL, tmp)r100_mm_wreg(rdev, (0xF4B0), (tmp), 0);
5189 WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2)r600_uvd_ctx_wreg(rdev, (0xC1), (tmp2));
5190}
5191
5192void si_init_uvd_internal_cg(struct radeon_device *rdev)
5193{
5194 bool_Bool hw_mode = true1;
5195
5196 if (hw_mode) {
5197 si_set_uvd_dcm(rdev, false0);
5198 } else {
5199 u32 tmp = RREG32(UVD_CGC_CTRL)r100_mm_rreg(rdev, (0xF4B0), 0);
5200 tmp &= ~DCM(1 << 0);
5201 WREG32(UVD_CGC_CTRL, tmp)r100_mm_wreg(rdev, (0xF4B0), (tmp), 0);
5202 }
5203}
5204
5205static u32 si_halt_rlc(struct radeon_device *rdev)
5206{
5207 u32 data, orig;
5208
5209 orig = data = RREG32(RLC_CNTL)r100_mm_rreg(rdev, (0xC300), 0);
5210
5211 if (data & RLC_ENABLE(1 << 0)) {
5212 data &= ~RLC_ENABLE(1 << 0);
5213 WREG32(RLC_CNTL, data)r100_mm_wreg(rdev, (0xC300), (data), 0);
5214
5215 si_wait_for_rlc_serdes(rdev);
5216 }
5217
5218 return orig;
5219}
5220
5221static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
5222{
5223 u32 tmp;
5224
5225 tmp = RREG32(RLC_CNTL)r100_mm_rreg(rdev, (0xC300), 0);
5226 if (tmp != rlc)
5227 WREG32(RLC_CNTL, rlc)r100_mm_wreg(rdev, (0xC300), (rlc), 0);
5228}
5229
5230static void si_enable_dma_pg(struct radeon_device *rdev, bool_Bool enable)
5231{
5232 u32 data, orig;
5233
5234 orig = data = RREG32(DMA_PG)r100_mm_rreg(rdev, (0xd0d4), 0);
5235 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA(1 << 8)))
5236 data |= PG_CNTL_ENABLE(1 << 0);
5237 else
5238 data &= ~PG_CNTL_ENABLE(1 << 0);
5239 if (orig != data)
5240 WREG32(DMA_PG, data)r100_mm_wreg(rdev, (0xd0d4), (data), 0);
5241}
5242
5243static void si_init_dma_pg(struct radeon_device *rdev)
5244{
5245 u32 tmp;
5246
5247 WREG32(DMA_PGFSM_WRITE, 0x00002000)r100_mm_wreg(rdev, (0xd0dc), (0x00002000), 0);
5248 WREG32(DMA_PGFSM_CONFIG, 0x100010ff)r100_mm_wreg(rdev, (0xd0d8), (0x100010ff), 0);
5249
5250 for (tmp = 0; tmp < 5; tmp++)
5251 WREG32(DMA_PGFSM_WRITE, 0)r100_mm_wreg(rdev, (0xd0dc), (0), 0);
5252}
5253
5254static void si_enable_gfx_cgpg(struct radeon_device *rdev,
5255 bool_Bool enable)
5256{
5257 u32 tmp;
5258
5259 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG(1 << 0))) {
5260 tmp = RLC_PUD(0x10)((0x10) << 0) | RLC_PDD(0x10)((0x10) << 8) | RLC_TTPD(0x10)((0x10) << 16) | RLC_MSD(0x10)((0x10) << 24);
5261 WREG32(RLC_TTOP_D, tmp)r100_mm_wreg(rdev, (0xC414), (tmp), 0);
5262
5263 tmp = RREG32(RLC_PG_CNTL)r100_mm_rreg(rdev, (0xC35C), 0);
5264 tmp |= GFX_PG_ENABLE(1 << 0);
5265 WREG32(RLC_PG_CNTL, tmp)r100_mm_wreg(rdev, (0xC35C), (tmp), 0);
5266
5267 tmp = RREG32(RLC_AUTO_PG_CTRL)r100_mm_rreg(rdev, (0xC434), 0);
5268 tmp |= AUTO_PG_EN(1 << 0);
5269 WREG32(RLC_AUTO_PG_CTRL, tmp)r100_mm_wreg(rdev, (0xC434), (tmp), 0);
5270 } else {
5271 tmp = RREG32(RLC_AUTO_PG_CTRL)r100_mm_rreg(rdev, (0xC434), 0);
5272 tmp &= ~AUTO_PG_EN(1 << 0);
5273 WREG32(RLC_AUTO_PG_CTRL, tmp)r100_mm_wreg(rdev, (0xC434), (tmp), 0);
5274
5275 tmp = RREG32(DB_RENDER_CONTROL)r100_mm_rreg(rdev, (0x28000), 0);
5276 }
5277}
5278
5279static void si_init_gfx_cgpg(struct radeon_device *rdev)
5280{
5281 u32 tmp;
5282
5283 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8)r100_mm_wreg(rdev, (0xC310), (rdev->rlc.save_restore_gpu_addr
>> 8), 0)
;
5284
5285 tmp = RREG32(RLC_PG_CNTL)r100_mm_rreg(rdev, (0xC35C), 0);
5286 tmp |= GFX_PG_SRC(1 << 1);
5287 WREG32(RLC_PG_CNTL, tmp)r100_mm_wreg(rdev, (0xC35C), (tmp), 0);
5288
5289 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8)r100_mm_wreg(rdev, (0xC320), (rdev->rlc.clear_state_gpu_addr
>> 8), 0)
;
5290
5291 tmp = RREG32(RLC_AUTO_PG_CTRL)r100_mm_rreg(rdev, (0xC434), 0);
5292
5293 tmp &= ~GRBM_REG_SGIT_MASK(0xffff << 3);
5294 tmp |= GRBM_REG_SGIT(0x700)((0x700) << 3);
5295 tmp &= ~PG_AFTER_GRBM_REG_ST_MASK(0x1fff << 19);
5296 WREG32(RLC_AUTO_PG_CTRL, tmp)r100_mm_wreg(rdev, (0xC434), (tmp), 0);
5297}
5298
5299static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
5300{
5301 u32 mask = 0, tmp, tmp1;
5302 int i;
5303
5304 si_select_se_sh(rdev, se, sh);
5305 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG)r100_mm_rreg(rdev, (0x89bc), 0);
5306 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG)r100_mm_rreg(rdev, (0x89c0), 0);
5307 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5308
5309 tmp &= 0xffff0000;
5310
5311 tmp |= tmp1;
5312 tmp >>= 16;
5313
5314 for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
5315 mask <<= 1;
5316 mask |= 1;
5317 }
5318
5319 return (~tmp) & mask;
5320}
5321
5322static void si_init_ao_cu_mask(struct radeon_device *rdev)
5323{
5324 u32 i, j, k, active_cu_number = 0;
5325 u32 mask, counter, cu_bitmap;
5326 u32 tmp = 0;
5327
5328 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
5329 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
5330 mask = 1;
5331 cu_bitmap = 0;
5332 counter = 0;
5333 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
5334 if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
5335 if (counter < 2)
5336 cu_bitmap |= mask;
5337 counter++;
5338 }
5339 mask <<= 1;
5340 }
5341
5342 active_cu_number += counter;
5343 tmp |= (cu_bitmap << (i * 16 + j * 8));
5344 }
5345 }
5346
5347 WREG32(RLC_PG_AO_CU_MASK, tmp)r100_mm_wreg(rdev, (0xC42C), (tmp), 0);
5348
5349 tmp = RREG32(RLC_MAX_PG_CU)r100_mm_rreg(rdev, (0xC430), 0);
5350 tmp &= ~MAX_PU_CU_MASK(0xff << 0);
5351 tmp |= MAX_PU_CU(active_cu_number)((active_cu_number) << 0);
5352 WREG32(RLC_MAX_PG_CU, tmp)r100_mm_wreg(rdev, (0xC430), (tmp), 0);
5353}
5354
5355static void si_enable_cgcg(struct radeon_device *rdev,
5356 bool_Bool enable)
5357{
5358 u32 data, orig, tmp;
5359
5360 orig = data = RREG32(RLC_CGCG_CGLS_CTRL)r100_mm_rreg(rdev, (0xC404), 0);
5361
5362 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG(1 << 2))) {
5363 si_enable_gui_idle_interrupt(rdev, true1);
5364
5365 WREG32(RLC_GCPM_GENERAL_3, 0x00000080)r100_mm_wreg(rdev, (0xC478), (0x00000080), 0);
5366
5367 tmp = si_halt_rlc(rdev);
5368
5369 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff)r100_mm_wreg(rdev, (0xC454), (0xffffffff), 0);
5370 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff)r100_mm_wreg(rdev, (0xC458), (0xffffffff), 0);
5371 WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff)r100_mm_wreg(rdev, (0xC45C), (0x00b000ff), 0);
5372
5373 si_wait_for_rlc_serdes(rdev);
5374
5375 si_update_rlc(rdev, tmp);
5376
5377 WREG32(RLC_SERDES_WR_CTRL, 0x007000ff)r100_mm_wreg(rdev, (0xC45C), (0x007000ff), 0);
5378
5379 data |= CGCG_EN(1 << 0) | CGLS_EN(1 << 1);
5380 } else {
5381 si_enable_gui_idle_interrupt(rdev, false0);
5382
5383 RREG32(CB_CGTT_SCLK_CTRL)r100_mm_rreg(rdev, (0x9a60), 0);
5384 RREG32(CB_CGTT_SCLK_CTRL)r100_mm_rreg(rdev, (0x9a60), 0);
5385 RREG32(CB_CGTT_SCLK_CTRL)r100_mm_rreg(rdev, (0x9a60), 0);
5386 RREG32(CB_CGTT_SCLK_CTRL)r100_mm_rreg(rdev, (0x9a60), 0);
5387
5388 data &= ~(CGCG_EN(1 << 0) | CGLS_EN(1 << 1));
5389 }
5390
5391 if (orig != data)
5392 WREG32(RLC_CGCG_CGLS_CTRL, data)r100_mm_wreg(rdev, (0xC404), (data), 0);
5393}
5394
5395static void si_enable_mgcg(struct radeon_device *rdev,
5396 bool_Bool enable)
5397{
5398 u32 data, orig, tmp = 0;
5399
5400 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG(1 << 0))) {
5401 orig = data = RREG32(CGTS_SM_CTRL_REG)r100_mm_rreg(rdev, (0x9150), 0);
5402 data = 0x96940200;
5403 if (orig != data)
5404 WREG32(CGTS_SM_CTRL_REG, data)r100_mm_wreg(rdev, (0x9150), (data), 0);
5405
5406 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS(1 << 6)) {
5407 orig = data = RREG32(CP_MEM_SLP_CNTL)r100_mm_rreg(rdev, (0xC1E4), 0);
5408 data |= CP_MEM_LS_EN(1 << 0);
5409 if (orig != data)
5410 WREG32(CP_MEM_SLP_CNTL, data)r100_mm_wreg(rdev, (0xC1E4), (data), 0);
5411 }
5412
5413 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE)r100_mm_rreg(rdev, (0xC400), 0);
5414 data &= 0xffffffc0;
5415 if (orig != data)
5416 WREG32(RLC_CGTT_MGCG_OVERRIDE, data)r100_mm_wreg(rdev, (0xC400), (data), 0);
5417
5418 tmp = si_halt_rlc(rdev);
5419
5420 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff)r100_mm_wreg(rdev, (0xC454), (0xffffffff), 0);
5421 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff)r100_mm_wreg(rdev, (0xC458), (0xffffffff), 0);
5422 WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff)r100_mm_wreg(rdev, (0xC45C), (0x00d000ff), 0);
5423
5424 si_update_rlc(rdev, tmp);
5425 } else {
5426 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE)r100_mm_rreg(rdev, (0xC400), 0);
5427 data |= 0x00000003;
5428 if (orig != data)
5429 WREG32(RLC_CGTT_MGCG_OVERRIDE, data)r100_mm_wreg(rdev, (0xC400), (data), 0);
5430
5431 data = RREG32(CP_MEM_SLP_CNTL)r100_mm_rreg(rdev, (0xC1E4), 0);
5432 if (data & CP_MEM_LS_EN(1 << 0)) {
5433 data &= ~CP_MEM_LS_EN(1 << 0);
5434 WREG32(CP_MEM_SLP_CNTL, data)r100_mm_wreg(rdev, (0xC1E4), (data), 0);
5435 }
5436 orig = data = RREG32(CGTS_SM_CTRL_REG)r100_mm_rreg(rdev, (0x9150), 0);
5437 data |= LS_OVERRIDE(1 << 22) | OVERRIDE(1 << 21);
5438 if (orig != data)
5439 WREG32(CGTS_SM_CTRL_REG, data)r100_mm_wreg(rdev, (0x9150), (data), 0);
5440
5441 tmp = si_halt_rlc(rdev);
5442
5443 WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff)r100_mm_wreg(rdev, (0xC454), (0xffffffff), 0);
5444 WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff)r100_mm_wreg(rdev, (0xC458), (0xffffffff), 0);
5445 WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff)r100_mm_wreg(rdev, (0xC45C), (0x00e000ff), 0);
5446
5447 si_update_rlc(rdev, tmp);
5448 }
5449}
5450
5451static void si_enable_uvd_mgcg(struct radeon_device *rdev,
5452 bool_Bool enable)
5453{
5454 u32 orig, data, tmp;
5455
5456 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG(1 << 13))) {
5457 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL)r600_uvd_ctx_rreg(rdev, (0xC0));
5458 tmp |= 0x3fff;
5459 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp)r600_uvd_ctx_wreg(rdev, (0xC0), (tmp));
5460
5461 orig = data = RREG32(UVD_CGC_CTRL)r100_mm_rreg(rdev, (0xF4B0), 0);
5462 data |= DCM(1 << 0);
5463 if (orig != data)
5464 WREG32(UVD_CGC_CTRL, data)r100_mm_wreg(rdev, (0xF4B0), (data), 0);
5465
5466 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0)tn_smc_wreg(rdev, (0xc0030000 + 0x400), (0));
5467 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0)tn_smc_wreg(rdev, (0xc0030000 + 0x401), (0));
5468 } else {
5469 tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL)r600_uvd_ctx_rreg(rdev, (0xC0));
5470 tmp &= ~0x3fff;
5471 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp)r600_uvd_ctx_wreg(rdev, (0xC0), (tmp));
5472
5473 orig = data = RREG32(UVD_CGC_CTRL)r100_mm_rreg(rdev, (0xF4B0), 0);
5474 data &= ~DCM(1 << 0);
5475 if (orig != data)
5476 WREG32(UVD_CGC_CTRL, data)r100_mm_wreg(rdev, (0xF4B0), (data), 0);
5477
5478 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff)tn_smc_wreg(rdev, (0xc0030000 + 0x400), (0xffffffff));
5479 WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff)tn_smc_wreg(rdev, (0xc0030000 + 0x401), (0xffffffff));
5480 }
5481}
5482
5483static const u32 mc_cg_registers[] =
5484{
5485 MC_HUB_MISC_HUB_CG0x20b8,
5486 MC_HUB_MISC_SIP_CG0x20c0,
5487 MC_HUB_MISC_VM_CG0x20bc,
5488 MC_XPB_CLK_GAT0x2478,
5489 ATC_MISC_CG0x3350,
5490 MC_CITF_MISC_WR_CG0x264c,
5491 MC_CITF_MISC_RD_CG0x2648,
5492 MC_CITF_MISC_VM_CG0x2650,
5493 VM_L2_CG0x15c0,
5494};
5495
5496static void si_enable_mc_ls(struct radeon_device *rdev,
5497 bool_Bool enable)
5498{
5499 int i;
5500 u32 orig, data;
5501
5502 for (i = 0; i < ARRAY_SIZE(mc_cg_registers)(sizeof((mc_cg_registers)) / sizeof((mc_cg_registers)[0])); i++) {
5503 orig = data = RREG32(mc_cg_registers[i])r100_mm_rreg(rdev, (mc_cg_registers[i]), 0);
5504 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS(1 << 8)))
5505 data |= MC_LS_ENABLE(1 << 19);
5506 else
5507 data &= ~MC_LS_ENABLE(1 << 19);
5508 if (data != orig)
5509 WREG32(mc_cg_registers[i], data)r100_mm_wreg(rdev, (mc_cg_registers[i]), (data), 0);
5510 }
5511}
5512
5513static void si_enable_mc_mgcg(struct radeon_device *rdev,
5514 bool_Bool enable)
5515{
5516 int i;
5517 u32 orig, data;
5518
5519 for (i = 0; i < ARRAY_SIZE(mc_cg_registers)(sizeof((mc_cg_registers)) / sizeof((mc_cg_registers)[0])); i++) {
5520 orig = data = RREG32(mc_cg_registers[i])r100_mm_rreg(rdev, (mc_cg_registers[i]), 0);
5521 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG(1 << 9)))
5522 data |= MC_CG_ENABLE(1 << 18);
5523 else
5524 data &= ~MC_CG_ENABLE(1 << 18);
5525 if (data != orig)
5526 WREG32(mc_cg_registers[i], data)r100_mm_wreg(rdev, (mc_cg_registers[i]), (data), 0);
5527 }
5528}
5529
5530static void si_enable_dma_mgcg(struct radeon_device *rdev,
5531 bool_Bool enable)
5532{
5533 u32 orig, data, offset;
5534 int i;
5535
5536 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG(1 << 11))) {
5537 for (i = 0; i < 2; i++) {
5538 if (i == 0)
5539 offset = DMA0_REGISTER_OFFSET0x0;
5540 else
5541 offset = DMA1_REGISTER_OFFSET0x800;
5542 orig = data = RREG32(DMA_POWER_CNTL + offset)r100_mm_rreg(rdev, (0xd0bc + offset), 0);
5543 data &= ~MEM_POWER_OVERRIDE(1 << 8);
5544 if (data != orig)
5545 WREG32(DMA_POWER_CNTL + offset, data)r100_mm_wreg(rdev, (0xd0bc + offset), (data), 0);
5546 WREG32(DMA_CLK_CTRL + offset, 0x00000100)r100_mm_wreg(rdev, (0xd0c0 + offset), (0x00000100), 0);
5547 }
5548 } else {
5549 for (i = 0; i < 2; i++) {
5550 if (i == 0)
5551 offset = DMA0_REGISTER_OFFSET0x0;
5552 else
5553 offset = DMA1_REGISTER_OFFSET0x800;
5554 orig = data = RREG32(DMA_POWER_CNTL + offset)r100_mm_rreg(rdev, (0xd0bc + offset), 0);
5555 data |= MEM_POWER_OVERRIDE(1 << 8);
5556 if (data != orig)
5557 WREG32(DMA_POWER_CNTL + offset, data)r100_mm_wreg(rdev, (0xd0bc + offset), (data), 0);
5558
5559 orig = data = RREG32(DMA_CLK_CTRL + offset)r100_mm_rreg(rdev, (0xd0c0 + offset), 0);
5560 data = 0xff000000;
5561 if (data != orig)
5562 WREG32(DMA_CLK_CTRL + offset, data)r100_mm_wreg(rdev, (0xd0c0 + offset), (data), 0);
5563 }
5564 }
5565}
5566
5567static void si_enable_bif_mgls(struct radeon_device *rdev,
5568 bool_Bool enable)
5569{
5570 u32 orig, data;
5571
5572 orig = data = RREG32_PCIE(PCIE_CNTL2)rv370_pcie_rreg(rdev, (0x1c));
5573
5574 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS(1 << 12)))
5575 data |= SLV_MEM_LS_EN(1 << 16) | MST_MEM_LS_EN(1 << 18) |
5576 REPLAY_MEM_LS_EN(1 << 19) | SLV_MEM_AGGRESSIVE_LS_EN(1 << 17);
5577 else
5578 data &= ~(SLV_MEM_LS_EN(1 << 16) | MST_MEM_LS_EN(1 << 18) |
5579 REPLAY_MEM_LS_EN(1 << 19) | SLV_MEM_AGGRESSIVE_LS_EN(1 << 17));
5580
5581 if (orig != data)
5582 WREG32_PCIE(PCIE_CNTL2, data)rv370_pcie_wreg(rdev, (0x1c), (data));
5583}
5584
5585static void si_enable_hdp_mgcg(struct radeon_device *rdev,
5586 bool_Bool enable)
5587{
5588 u32 orig, data;
5589
5590 orig = data = RREG32(HDP_HOST_PATH_CNTL)r100_mm_rreg(rdev, (0x2C00), 0);
5591
5592 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG(1 << 16)))
5593 data &= ~CLOCK_GATING_DIS(1 << 23);
5594 else
5595 data |= CLOCK_GATING_DIS(1 << 23);
5596
5597 if (orig != data)
5598 WREG32(HDP_HOST_PATH_CNTL, data)r100_mm_wreg(rdev, (0x2C00), (data), 0);
5599}
5600
5601static void si_enable_hdp_ls(struct radeon_device *rdev,
5602 bool_Bool enable)
5603{
5604 u32 orig, data;
5605
5606 orig = data = RREG32(HDP_MEM_POWER_LS)r100_mm_rreg(rdev, (0x2F50), 0);
5607
5608 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS(1 << 15)))
5609 data |= HDP_LS_ENABLE(1 << 0);
5610 else
5611 data &= ~HDP_LS_ENABLE(1 << 0);
5612
5613 if (orig != data)
5614 WREG32(HDP_MEM_POWER_LS, data)r100_mm_wreg(rdev, (0x2F50), (data), 0);
5615}
5616
5617static void si_update_cg(struct radeon_device *rdev,
5618 u32 block, bool_Bool enable)
5619{
5620 if (block & RADEON_CG_BLOCK_GFX(1 << 0)) {
5621 si_enable_gui_idle_interrupt(rdev, false0);
5622 /* order matters! */
5623 if (enable) {
5624 si_enable_mgcg(rdev, true1);
5625 si_enable_cgcg(rdev, true1);
5626 } else {
5627 si_enable_cgcg(rdev, false0);
5628 si_enable_mgcg(rdev, false0);
5629 }
5630 si_enable_gui_idle_interrupt(rdev, true1);
5631 }
5632
5633 if (block & RADEON_CG_BLOCK_MC(1 << 1)) {
5634 si_enable_mc_mgcg(rdev, enable);
5635 si_enable_mc_ls(rdev, enable);
5636 }
5637
5638 if (block & RADEON_CG_BLOCK_SDMA(1 << 2)) {
5639 si_enable_dma_mgcg(rdev, enable);
5640 }
5641
5642 if (block & RADEON_CG_BLOCK_BIF(1 << 6)) {
5643 si_enable_bif_mgls(rdev, enable);
5644 }
5645
5646 if (block & RADEON_CG_BLOCK_UVD(1 << 3)) {
5647 if (rdev->has_uvd) {
5648 si_enable_uvd_mgcg(rdev, enable);
5649 }
5650 }
5651
5652 if (block & RADEON_CG_BLOCK_HDP(1 << 5)) {
5653 si_enable_hdp_mgcg(rdev, enable);
5654 si_enable_hdp_ls(rdev, enable);
5655 }
5656}
5657
5658static void si_init_cg(struct radeon_device *rdev)
5659{
5660 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX(1 << 0) |
5661 RADEON_CG_BLOCK_MC(1 << 1) |
5662 RADEON_CG_BLOCK_SDMA(1 << 2) |
5663 RADEON_CG_BLOCK_BIF(1 << 6) |
5664 RADEON_CG_BLOCK_HDP(1 << 5)), true1);
5665 if (rdev->has_uvd) {
5666 si_update_cg(rdev, RADEON_CG_BLOCK_UVD(1 << 3), true1);
5667 si_init_uvd_internal_cg(rdev);
5668 }
5669}
5670
5671static void si_fini_cg(struct radeon_device *rdev)
5672{
5673 if (rdev->has_uvd) {
5674 si_update_cg(rdev, RADEON_CG_BLOCK_UVD(1 << 3), false0);
5675 }
5676 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX(1 << 0) |
5677 RADEON_CG_BLOCK_MC(1 << 1) |
5678 RADEON_CG_BLOCK_SDMA(1 << 2) |
5679 RADEON_CG_BLOCK_BIF(1 << 6) |
5680 RADEON_CG_BLOCK_HDP(1 << 5)), false0);
5681}
5682
5683u32 si_get_csb_size(struct radeon_device *rdev)
5684{
5685 u32 count = 0;
5686 const struct cs_section_def *sect = NULL((void *)0);
5687 const struct cs_extent_def *ext = NULL((void *)0);
5688
5689 if (rdev->rlc.cs_data == NULL((void *)0))
5690 return 0;
5691
5692 /* begin clear state */
5693 count += 2;
5694 /* context control state */
5695 count += 3;
5696
5697 for (sect = rdev->rlc.cs_data; sect->section != NULL((void *)0); ++sect) {
5698 for (ext = sect->section; ext->extent != NULL((void *)0); ++ext) {
5699 if (sect->id == SECT_CONTEXT)
5700 count += 2 + ext->reg_count;
5701 else
5702 return 0;
5703 }
5704 }
5705 /* pa_sc_raster_config */
5706 count += 3;
5707 /* end clear state */
5708 count += 2;
5709 /* clear state */
5710 count += 2;
5711
5712 return count;
5713}
5714
5715void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5716{
5717 u32 count = 0, i;
5718 const struct cs_section_def *sect = NULL((void *)0);
5719 const struct cs_extent_def *ext = NULL((void *)0);
5720
5721 if (rdev->rlc.cs_data == NULL((void *)0))
5722 return;
5723 if (buffer == NULL((void *)0))
5724 return;
5725
5726 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0))((__uint32_t)(((3 << 30) | (((0x4A) & 0xFF) <<
8) | ((0) & 0x3FFF) << 16)))
;
5727 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE)((__uint32_t)((2 << 28)));
5728
5729 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1))((__uint32_t)(((3 << 30) | (((0x28) & 0xFF) <<
8) | ((1) & 0x3FFF) << 16)))
;
5730 buffer[count++] = cpu_to_le32(0x80000000)((__uint32_t)(0x80000000));
5731 buffer[count++] = cpu_to_le32(0x80000000)((__uint32_t)(0x80000000));
5732
5733 for (sect = rdev->rlc.cs_data; sect->section != NULL((void *)0); ++sect) {
5734 for (ext = sect->section; ext->extent != NULL((void *)0); ++ext) {
5735 if (sect->id == SECT_CONTEXT) {
5736 buffer[count++] =
5737 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count))((__uint32_t)(((3 << 30) | (((0x69) & 0xFF) <<
8) | ((ext->reg_count) & 0x3FFF) << 16)))
;
5738 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000)((__uint32_t)(ext->reg_index - 0xa000));
5739 for (i = 0; i < ext->reg_count; i++)
5740 buffer[count++] = cpu_to_le32(ext->extent[i])((__uint32_t)(ext->extent[i]));
5741 } else {
5742 return;
5743 }
5744 }
5745 }
5746
5747 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1))((__uint32_t)(((3 << 30) | (((0x69) & 0xFF) <<
8) | ((1) & 0x3FFF) << 16)))
;
5748 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START)((__uint32_t)(0x28350 - 0x00028000));
5749 switch (rdev->family) {
5750 case CHIP_TAHITI:
5751 case CHIP_PITCAIRN:
5752 buffer[count++] = cpu_to_le32(0x2a00126a)((__uint32_t)(0x2a00126a));
5753 break;
5754 case CHIP_VERDE:
5755 buffer[count++] = cpu_to_le32(0x0000124a)((__uint32_t)(0x0000124a));
5756 break;
5757 case CHIP_OLAND:
5758 buffer[count++] = cpu_to_le32(0x00000082)((__uint32_t)(0x00000082));
5759 break;
5760 case CHIP_HAINAN:
5761 buffer[count++] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
5762 break;
5763 default:
5764 buffer[count++] = cpu_to_le32(0x00000000)((__uint32_t)(0x00000000));
5765 break;
5766 }
5767
5768 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0))((__uint32_t)(((3 << 30) | (((0x4A) & 0xFF) <<
8) | ((0) & 0x3FFF) << 16)))
;
5769 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE)((__uint32_t)((3 << 28)));
5770
5771 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0))((__uint32_t)(((3 << 30) | (((0x12) & 0xFF) <<
8) | ((0) & 0x3FFF) << 16)))
;
5772 buffer[count++] = cpu_to_le32(0)((__uint32_t)(0));
5773}
5774
5775static void si_init_pg(struct radeon_device *rdev)
5776{
5777 if (rdev->pg_flags) {
5778 if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA(1 << 8)) {
5779 si_init_dma_pg(rdev);
5780 }
5781 si_init_ao_cu_mask(rdev);
5782 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG(1 << 0)) {
5783 si_init_gfx_cgpg(rdev);
5784 } else {
5785 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8)r100_mm_wreg(rdev, (0xC310), (rdev->rlc.save_restore_gpu_addr
>> 8), 0)
;
5786 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8)r100_mm_wreg(rdev, (0xC320), (rdev->rlc.clear_state_gpu_addr
>> 8), 0)
;
5787 }
5788 si_enable_dma_pg(rdev, true1);
5789 si_enable_gfx_cgpg(rdev, true1);
5790 } else {
5791 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8)r100_mm_wreg(rdev, (0xC310), (rdev->rlc.save_restore_gpu_addr
>> 8), 0)
;
5792 WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8)r100_mm_wreg(rdev, (0xC320), (rdev->rlc.clear_state_gpu_addr
>> 8), 0)
;
5793 }
5794}
5795
5796static void si_fini_pg(struct radeon_device *rdev)
5797{
5798 if (rdev->pg_flags) {
5799 si_enable_dma_pg(rdev, false0);
5800 si_enable_gfx_cgpg(rdev, false0);
5801 }
5802}
5803
5804/*
5805 * RLC
5806 */
5807void si_rlc_reset(struct radeon_device *rdev)
5808{
5809 u32 tmp = RREG32(GRBM_SOFT_RESET)r100_mm_rreg(rdev, (0x8020), 0);
5810
5811 tmp |= SOFT_RESET_RLC(1 << 2);
5812 WREG32(GRBM_SOFT_RESET, tmp)r100_mm_wreg(rdev, (0x8020), (tmp), 0);
5813 udelay(50);
5814 tmp &= ~SOFT_RESET_RLC(1 << 2);
5815 WREG32(GRBM_SOFT_RESET, tmp)r100_mm_wreg(rdev, (0x8020), (tmp), 0);
5816 udelay(50);
5817}
5818
5819static void si_rlc_stop(struct radeon_device *rdev)
5820{
5821 WREG32(RLC_CNTL, 0)r100_mm_wreg(rdev, (0xC300), (0), 0);
5822
5823 si_enable_gui_idle_interrupt(rdev, false0);
5824
5825 si_wait_for_rlc_serdes(rdev);
5826}
5827
5828static void si_rlc_start(struct radeon_device *rdev)
5829{
5830 WREG32(RLC_CNTL, RLC_ENABLE)r100_mm_wreg(rdev, (0xC300), ((1 << 0)), 0);
5831
5832 si_enable_gui_idle_interrupt(rdev, true1);
5833
5834 udelay(50);
5835}
5836
5837static bool_Bool si_lbpw_supported(struct radeon_device *rdev)
5838{
5839 u32 tmp;
5840
5841 /* Enable LBPW only for DDR3 */
5842 tmp = RREG32(MC_SEQ_MISC0)r100_mm_rreg(rdev, (0x2a00), 0);
5843 if ((tmp & 0xF0000000) == 0xB0000000)
5844 return true1;
5845 return false0;
5846}
5847
5848static void si_enable_lbpw(struct radeon_device *rdev, bool_Bool enable)
5849{
5850 u32 tmp;
5851
5852 tmp = RREG32(RLC_LB_CNTL)r100_mm_rreg(rdev, (0xC30C), 0);
5853 if (enable)
5854 tmp |= LOAD_BALANCE_ENABLE(1 << 0);
5855 else
5856 tmp &= ~LOAD_BALANCE_ENABLE(1 << 0);
5857 WREG32(RLC_LB_CNTL, tmp)r100_mm_wreg(rdev, (0xC30C), (tmp), 0);
5858
5859 if (!enable) {
5860 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5861 WREG32(SPI_LB_CU_MASK, 0x00ff)r100_mm_wreg(rdev, (0x9354), (0x00ff), 0);
5862 }
5863}
5864
5865static int si_rlc_resume(struct radeon_device *rdev)
5866{
5867 u32 i;
5868
5869 if (!rdev->rlc_fw)
5870 return -EINVAL22;
5871
5872 si_rlc_stop(rdev);
5873
5874 si_rlc_reset(rdev);
5875
5876 si_init_pg(rdev);
5877
5878 si_init_cg(rdev);
5879
5880 WREG32(RLC_RL_BASE, 0)r100_mm_wreg(rdev, (0xC304), (0), 0);
5881 WREG32(RLC_RL_SIZE, 0)r100_mm_wreg(rdev, (0xC308), (0), 0);
5882 WREG32(RLC_LB_CNTL, 0)r100_mm_wreg(rdev, (0xC30C), (0), 0);
5883 WREG32(RLC_LB_CNTR_MAX, 0xffffffff)r100_mm_wreg(rdev, (0xC314), (0xffffffff), 0);
5884 WREG32(RLC_LB_CNTR_INIT, 0)r100_mm_wreg(rdev, (0xC318), (0), 0);
5885 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff)r100_mm_wreg(rdev, (0xC41C), (0xffffffff), 0);
5886
5887 WREG32(RLC_MC_CNTL, 0)r100_mm_wreg(rdev, (0xC344), (0), 0);
5888 WREG32(RLC_UCODE_CNTL, 0)r100_mm_wreg(rdev, (0xC348), (0), 0);
5889
5890 if (rdev->new_fw) {
5891 const struct rlc_firmware_header_v1_0 *hdr =
5892 (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
5893 u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes)((__uint32_t)(hdr->header.ucode_size_bytes)) / 4;
5894 const __le32 *fw_data = (const __le32 *)
5895 (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)((__uint32_t)(hdr->header.ucode_array_offset_bytes)));
5896
5897 radeon_ucode_print_rlc_hdr(&hdr->header);
5898
5899 for (i = 0; i < fw_size; i++) {
5900 WREG32(RLC_UCODE_ADDR, i)r100_mm_wreg(rdev, (0xC32C), (i), 0);
5901 WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++))r100_mm_wreg(rdev, (0xC330), (((__uint32_t)(*(__uint32_t *)(fw_data
++)))), 0)
;
5902 }
5903 } else {
5904 const __be32 *fw_data =
5905 (const __be32 *)rdev->rlc_fw->data;
5906 for (i = 0; i < SI_RLC_UCODE_SIZE2048; i++) {
5907 WREG32(RLC_UCODE_ADDR, i)r100_mm_wreg(rdev, (0xC32C), (i), 0);
5908 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++))r100_mm_wreg(rdev, (0xC330), ((__uint32_t)(__builtin_constant_p
(*(__uint32_t *)(fw_data++)) ? (__uint32_t)(((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff) << 24 | ((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff00) << 8 | ((__uint32_t)(*(__uint32_t
*)(fw_data++)) & 0xff0000) >> 8 | ((__uint32_t)(*(
__uint32_t *)(fw_data++)) & 0xff000000) >> 24) : __swap32md
(*(__uint32_t *)(fw_data++)))), 0)
;
5909 }
5910 }
5911 WREG32(RLC_UCODE_ADDR, 0)r100_mm_wreg(rdev, (0xC32C), (0), 0);
5912
5913 si_enable_lbpw(rdev, si_lbpw_supported(rdev));
5914
5915 si_rlc_start(rdev);
5916
5917 return 0;
5918}
5919
5920static void si_enable_interrupts(struct radeon_device *rdev)
5921{
5922 u32 ih_cntl = RREG32(IH_CNTL)r100_mm_rreg(rdev, (0x3e18), 0);
5923 u32 ih_rb_cntl = RREG32(IH_RB_CNTL)r100_mm_rreg(rdev, (0x3e00), 0);
5924
5925 ih_cntl |= ENABLE_INTR(1 << 0);
5926 ih_rb_cntl |= IH_RB_ENABLE(1 << 0);
5927 WREG32(IH_CNTL, ih_cntl)r100_mm_wreg(rdev, (0x3e18), (ih_cntl), 0);
5928 WREG32(IH_RB_CNTL, ih_rb_cntl)r100_mm_wreg(rdev, (0x3e00), (ih_rb_cntl), 0);
5929 rdev->ih.enabled = true1;
5930}
5931
5932static void si_disable_interrupts(struct radeon_device *rdev)
5933{
5934 u32 ih_rb_cntl = RREG32(IH_RB_CNTL)r100_mm_rreg(rdev, (0x3e00), 0);
5935 u32 ih_cntl = RREG32(IH_CNTL)r100_mm_rreg(rdev, (0x3e18), 0);
5936
5937 ih_rb_cntl &= ~IH_RB_ENABLE(1 << 0);
5938 ih_cntl &= ~ENABLE_INTR(1 << 0);
5939 WREG32(IH_RB_CNTL, ih_rb_cntl)r100_mm_wreg(rdev, (0x3e00), (ih_rb_cntl), 0);
5940 WREG32(IH_CNTL, ih_cntl)r100_mm_wreg(rdev, (0x3e18), (ih_cntl), 0);
5941 /* set rptr, wptr to 0 */
5942 WREG32(IH_RB_RPTR, 0)r100_mm_wreg(rdev, (0x3e08), (0), 0);
5943 WREG32(IH_RB_WPTR, 0)r100_mm_wreg(rdev, (0x3e0c), (0), 0);
5944 rdev->ih.enabled = false0;
5945 rdev->ih.rptr = 0;
5946}
5947
5948static void si_disable_interrupt_state(struct radeon_device *rdev)
5949{
5950 int i;
5951 u32 tmp;
5952
5953 tmp = RREG32(CP_INT_CNTL_RING0)r100_mm_rreg(rdev, (0xC1A8), 0) &
5954 (CNTX_BUSY_INT_ENABLE(1 << 19) | CNTX_EMPTY_INT_ENABLE(1 << 20));
5955 WREG32(CP_INT_CNTL_RING0, tmp)r100_mm_wreg(rdev, (0xC1A8), (tmp), 0);
5956 WREG32(CP_INT_CNTL_RING1, 0)r100_mm_wreg(rdev, (0xC1AC), (0), 0);
5957 WREG32(CP_INT_CNTL_RING2, 0)r100_mm_wreg(rdev, (0xC1B0), (0), 0);
5958 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET)r100_mm_rreg(rdev, (0xd02c + 0x0), 0) & ~TRAP_ENABLE(1 << 0);
5959 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp)r100_mm_wreg(rdev, (0xd02c + 0x0), (tmp), 0);
5960 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET)r100_mm_rreg(rdev, (0xd02c + 0x800), 0) & ~TRAP_ENABLE(1 << 0);
5961 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp)r100_mm_wreg(rdev, (0xd02c + 0x800), (tmp), 0);
5962 WREG32(GRBM_INT_CNTL, 0)r100_mm_wreg(rdev, (0x8060), (0), 0);
5963 WREG32(SRBM_INT_CNTL, 0)r100_mm_wreg(rdev, (0xEA0), (0), 0);
5964 for (i = 0; i < rdev->num_crtc; i++)
5965 WREG32(INT_MASK + crtc_offsets[i], 0)r100_mm_wreg(rdev, (0x6b40 + crtc_offsets[i]), (0), 0);
5966 for (i = 0; i < rdev->num_crtc; i++)
5967 WREG32(GRPH_INT_CONTROL + crtc_offsets[i], 0)r100_mm_wreg(rdev, (0x685c + crtc_offsets[i]), (0), 0);
5968
5969 if (!ASIC_IS_NODCE(rdev)((rdev->family == CHIP_HAINAN))) {
5970 WREG32(DAC_AUTODETECT_INT_CONTROL, 0)r100_mm_wreg(rdev, (0x67c8), (0), 0);
5971
5972 for (i = 0; i < 6; i++)
5973 WREG32_AND(DC_HPDx_INT_CONTROL(i),do { uint32_t tmp_ = r100_mm_rreg(rdev, ((0x6020 + (i * 0xc))
), 0); tmp_ &= ((1 << 8)); tmp_ |= ((0) & ~((1 <<
8))); r100_mm_wreg(rdev, ((0x6020 + (i * 0xc))), (tmp_), 0);
} while (0)
5974 DC_HPDx_INT_POLARITY)do { uint32_t tmp_ = r100_mm_rreg(rdev, ((0x6020 + (i * 0xc))
), 0); tmp_ &= ((1 << 8)); tmp_ |= ((0) & ~((1 <<
8))); r100_mm_wreg(rdev, ((0x6020 + (i * 0xc))), (tmp_), 0);
} while (0)
;
5975 }
5976}
5977
5978static int si_irq_init(struct radeon_device *rdev)
5979{
5980 int ret = 0;
5981 int rb_bufsz;
5982 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
5983
5984 /* allocate ring */
5985 ret = r600_ih_ring_alloc(rdev);
5986 if (ret)
5987 return ret;
5988
5989 /* disable irqs */
5990 si_disable_interrupts(rdev);
5991
5992 /* init rlc */
5993 ret = si_rlc_resume(rdev);
5994 if (ret) {
5995 r600_ih_ring_fini(rdev);
5996 return ret;
5997 }
5998
5999 /* setup interrupt control */
6000 /* set dummy read address to dummy page address */
6001 WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8)r100_mm_wreg(rdev, (0x546c), (rdev->dummy_page.addr >>
8), 0)
;
6002 interrupt_cntl = RREG32(INTERRUPT_CNTL)r100_mm_rreg(rdev, (0x5468), 0);
6003 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
6004 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
6005 */
6006 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE(1 << 0);
6007 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
6008 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN(1 << 3);
6009 WREG32(INTERRUPT_CNTL, interrupt_cntl)r100_mm_wreg(rdev, (0x5468), (interrupt_cntl), 0);
6010
6011 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8)r100_mm_wreg(rdev, (0x3e04), (rdev->ih.gpu_addr >> 8
), 0)
;
6012 rb_bufsz = order_base_2(rdev->ih.ring_size / 4)drm_order(rdev->ih.ring_size / 4);
6013
6014 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE(1 << 16) |
6015 IH_WPTR_OVERFLOW_CLEAR(1 << 31) |
6016 (rb_bufsz << 1));
6017
6018 if (rdev->wb.enabled)
6019 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE(1 << 8);
6020
6021 /* set the writeback address whether it's enabled or not */
6022 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC)r100_mm_wreg(rdev, (0x3e14), ((rdev->wb.gpu_addr + 2048) &
0xFFFFFFFC), 0)
;
6023 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF)r100_mm_wreg(rdev, (0x3e10), (((u32)(((rdev->wb.gpu_addr +
2048) >> 16) >> 16)) & 0xFF), 0)
;
6024
6025 WREG32(IH_RB_CNTL, ih_rb_cntl)r100_mm_wreg(rdev, (0x3e00), (ih_rb_cntl), 0);
6026
6027 /* set rptr, wptr to 0 */
6028 WREG32(IH_RB_RPTR, 0)r100_mm_wreg(rdev, (0x3e08), (0), 0);
6029 WREG32(IH_RB_WPTR, 0)r100_mm_wreg(rdev, (0x3e0c), (0), 0);
6030
6031 /* Default settings for IH_CNTL (disabled at first) */
6032 ih_cntl = MC_WRREQ_CREDIT(0x10)((0x10) << 15) | MC_WR_CLEAN_CNT(0x10)((0x10) << 20) | MC_VMID(0)((0) << 25);
6033 /* RPTR_REARM only works if msi's are enabled */
6034 if (rdev->msi_enabled)
6035 ih_cntl |= RPTR_REARM(1 << 4);
6036 WREG32(IH_CNTL, ih_cntl)r100_mm_wreg(rdev, (0x3e18), (ih_cntl), 0);
6037
6038 /* force the active interrupt state to all disabled */
6039 si_disable_interrupt_state(rdev);
6040
6041 pci_set_master(rdev->pdev);
6042
6043 /* enable irqs */
6044 si_enable_interrupts(rdev);
6045
6046 return ret;
6047}
6048
6049/* The order we write back each register here is important */
6050int si_irq_set(struct radeon_device *rdev)
6051{
6052 int i;
6053 u32 cp_int_cntl;
6054 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
6055 u32 grbm_int_cntl = 0;
6056 u32 dma_cntl, dma_cntl1;
6057 u32 thermal_int = 0;
6058
6059 if (!rdev->irq.installed) {
6060 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n")({ int __ret = !!(1); if (__ret) printf("Can't enable IRQ/MSI because no handler is installed\n"
); __builtin_expect(!!(__ret), 0); })
;
6061 return -EINVAL22;
6062 }
6063 /* don't enable anything if the ih is disabled */
6064 if (!rdev->ih.enabled) {
6065 si_disable_interrupts(rdev);
6066 /* force the active interrupt state to all disabled */
6067 si_disable_interrupt_state(rdev);
6068 return 0;
6069 }
6070
6071 cp_int_cntl = RREG32(CP_INT_CNTL_RING0)r100_mm_rreg(rdev, (0xC1A8), 0) &
6072 (CNTX_BUSY_INT_ENABLE(1 << 19) | CNTX_EMPTY_INT_ENABLE(1 << 20));
6073
6074 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET)r100_mm_rreg(rdev, (0xd02c + 0x0), 0) & ~TRAP_ENABLE(1 << 0);
6075 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET)r100_mm_rreg(rdev, (0xd02c + 0x800), 0) & ~TRAP_ENABLE(1 << 0);
6076
6077 thermal_int = RREG32(CG_THERMAL_INT)r100_mm_rreg(rdev, (0x708), 0) &
6078 ~(THERM_INT_MASK_HIGH(1 << 24) | THERM_INT_MASK_LOW(1 << 25));
6079
6080 /* enable CP interrupts on all rings */
6081 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])({ typeof(*(&rdev->irq.ring_int[0])) __tmp = *(volatile
typeof(*(&rdev->irq.ring_int[0])) *)&(*(&rdev
->irq.ring_int[0])); membar_datadep_consumer(); __tmp; })
) {
6082 DRM_DEBUG("si_irq_set: sw int gfx\n")__drm_dbg(DRM_UT_CORE, "si_irq_set: sw int gfx\n");
6083 cp_int_cntl |= TIME_STAMP_INT_ENABLE(1 << 26);
6084 }
6085 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])({ typeof(*(&rdev->irq.ring_int[1])) __tmp = *(volatile
typeof(*(&rdev->irq.ring_int[1])) *)&(*(&rdev
->irq.ring_int[1])); membar_datadep_consumer(); __tmp; })
) {
6086 DRM_DEBUG("si_irq_set: sw int cp1\n")__drm_dbg(DRM_UT_CORE, "si_irq_set: sw int cp1\n");
6087 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE(1 << 26);
6088 }
6089 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])({ typeof(*(&rdev->irq.ring_int[2])) __tmp = *(volatile
typeof(*(&rdev->irq.ring_int[2])) *)&(*(&rdev
->irq.ring_int[2])); membar_datadep_consumer(); __tmp; })
) {
6090 DRM_DEBUG("si_irq_set: sw int cp2\n")__drm_dbg(DRM_UT_CORE, "si_irq_set: sw int cp2\n");
6091 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE(1 << 26);
6092 }
6093 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])({ typeof(*(&rdev->irq.ring_int[3])) __tmp = *(volatile
typeof(*(&rdev->irq.ring_int[3])) *)&(*(&rdev
->irq.ring_int[3])); membar_datadep_consumer(); __tmp; })
) {
6094 DRM_DEBUG("si_irq_set: sw int dma\n")__drm_dbg(DRM_UT_CORE, "si_irq_set: sw int dma\n");
6095 dma_cntl |= TRAP_ENABLE(1 << 0);
6096 }
6097
6098 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])({ typeof(*(&rdev->irq.ring_int[4])) __tmp = *(volatile
typeof(*(&rdev->irq.ring_int[4])) *)&(*(&rdev
->irq.ring_int[4])); membar_datadep_consumer(); __tmp; })
) {
6099 DRM_DEBUG("si_irq_set: sw int dma1\n")__drm_dbg(DRM_UT_CORE, "si_irq_set: sw int dma1\n");
6100 dma_cntl1 |= TRAP_ENABLE(1 << 0);
6101 }
6102
6103 WREG32(CP_INT_CNTL_RING0, cp_int_cntl)r100_mm_wreg(rdev, (0xC1A8), (cp_int_cntl), 0);
6104 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1)r100_mm_wreg(rdev, (0xC1AC), (cp_int_cntl1), 0);
6105 WREG32(CP_INT_CNTL_RING2, cp_int_cntl2)r100_mm_wreg(rdev, (0xC1B0), (cp_int_cntl2), 0);
6106
6107 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl)r100_mm_wreg(rdev, (0xd02c + 0x0), (dma_cntl), 0);
6108 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1)r100_mm_wreg(rdev, (0xd02c + 0x800), (dma_cntl1), 0);
6109
6110 WREG32(GRBM_INT_CNTL, grbm_int_cntl)r100_mm_wreg(rdev, (0x8060), (grbm_int_cntl), 0);
6111
6112 if (rdev->irq.dpm_thermal) {
6113 DRM_DEBUG("dpm thermal\n")__drm_dbg(DRM_UT_CORE, "dpm thermal\n");
6114 thermal_int |= THERM_INT_MASK_HIGH(1 << 24) | THERM_INT_MASK_LOW(1 << 25);
6115 }
6116
6117 for (i = 0; i < rdev->num_crtc; i++) {
6118 radeon_irq_kms_set_irq_n_enabled(
6119 rdev, INT_MASK0x6b40 + crtc_offsets[i], VBLANK_INT_MASK(1 << 0),
6120 rdev->irq.crtc_vblank_int[i] ||
6121 atomic_read(&rdev->irq.pflip[i])({ typeof(*(&rdev->irq.pflip[i])) __tmp = *(volatile typeof
(*(&rdev->irq.pflip[i])) *)&(*(&rdev->irq.pflip
[i])); membar_datadep_consumer(); __tmp; })
, "vblank", i);
6122 }
6123
6124 for (i = 0; i < rdev->num_crtc; i++)
6125 WREG32(GRPH_INT_CONTROL + crtc_offsets[i], GRPH_PFLIP_INT_MASK)r100_mm_wreg(rdev, (0x685c + crtc_offsets[i]), ((1 << 0
)), 0)
;
6126
6127 if (!ASIC_IS_NODCE(rdev)((rdev->family == CHIP_HAINAN))) {
6128 for (i = 0; i < 6; i++) {
6129 radeon_irq_kms_set_irq_n_enabled(
6130 rdev, DC_HPDx_INT_CONTROL(i)(0x6020 + (i * 0xc)),
6131 DC_HPDx_INT_EN(1 << 16) | DC_HPDx_RX_INT_EN(1 << 24),
6132 rdev->irq.hpd[i], "HPD", i);
6133 }
6134 }
6135
6136 WREG32(CG_THERMAL_INT, thermal_int)r100_mm_wreg(rdev, (0x708), (thermal_int), 0);
6137
6138 /* posting read */
6139 RREG32(SRBM_STATUS)r100_mm_rreg(rdev, (0xE50), 0);
6140
6141 return 0;
6142}
6143
6144/* The order we write back each register here is important */
6145static inline void si_irq_ack(struct radeon_device *rdev)
6146{
6147 int i, j;
6148 u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
6149 u32 *grph_int = rdev->irq.stat_regs.evergreen.grph_int;
6150
6151 if (ASIC_IS_NODCE(rdev)((rdev->family == CHIP_HAINAN)))
6152 return;
6153
6154 for (i = 0; i < 6; i++) {
6155 disp_int[i] = RREG32(si_disp_int_status[i])r100_mm_rreg(rdev, (si_disp_int_status[i]), 0);
6156 if (i < rdev->num_crtc)
6157 grph_int[i] = RREG32(GRPH_INT_STATUS + crtc_offsets[i])r100_mm_rreg(rdev, (0x6858 + crtc_offsets[i]), 0);
6158 }
6159
6160 /* We write back each interrupt register in pairs of two */
6161 for (i = 0; i < rdev->num_crtc; i += 2) {
6162 for (j = i; j < (i + 2); j++) {
6163 if (grph_int[j] & GRPH_PFLIP_INT_OCCURRED(1 << 0))
6164 WREG32(GRPH_INT_STATUS + crtc_offsets[j],r100_mm_wreg(rdev, (0x6858 + crtc_offsets[j]), ((1 << 8
)), 0)
6165 GRPH_PFLIP_INT_CLEAR)r100_mm_wreg(rdev, (0x6858 + crtc_offsets[j]), ((1 << 8
)), 0)
;
6166 }
6167
6168 for (j = i; j < (i + 2); j++) {
6169 if (disp_int[j] & LB_D1_VBLANK_INTERRUPT(1 << 3))
6170 WREG32(VBLANK_STATUS + crtc_offsets[j],r100_mm_wreg(rdev, (0x6bbc + crtc_offsets[j]), ((1 << 4
)), 0)
6171 VBLANK_ACK)r100_mm_wreg(rdev, (0x6bbc + crtc_offsets[j]), ((1 << 4
)), 0)
;
6172 if (disp_int[j] & LB_D1_VLINE_INTERRUPT(1 << 2))
6173 WREG32(VLINE_STATUS + crtc_offsets[j],r100_mm_wreg(rdev, (0x6bb8 + crtc_offsets[j]), ((1 << 4
)), 0)
6174 VLINE_ACK)r100_mm_wreg(rdev, (0x6bb8 + crtc_offsets[j]), ((1 << 4
)), 0)
;
6175 }
6176 }
6177
6178 for (i = 0; i < 6; i++) {
6179 if (disp_int[i] & DC_HPD1_INTERRUPT(1 << 17))
6180 WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_INT_ACK)do { uint32_t tmp_ = r100_mm_rreg(rdev, ((0x6020 + (i * 0xc))
), 0); tmp_ &= (~((1 << 0))); tmp_ |= (((1 <<
0)) & ~(~((1 << 0)))); r100_mm_wreg(rdev, ((0x6020
+ (i * 0xc))), (tmp_), 0); } while (0)
;
6181 }
6182
6183 for (i = 0; i < 6; i++) {
6184 if (disp_int[i] & DC_HPD1_RX_INTERRUPT(1 << 18))
6185 WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_RX_INT_ACK)do { uint32_t tmp_ = r100_mm_rreg(rdev, ((0x6020 + (i * 0xc))
), 0); tmp_ &= (~((1 << 20))); tmp_ |= (((1 <<
20)) & ~(~((1 << 20)))); r100_mm_wreg(rdev, ((0x6020
+ (i * 0xc))), (tmp_), 0); } while (0)
;
6186 }
6187}
6188
6189static void si_irq_disable(struct radeon_device *rdev)
6190{
6191 si_disable_interrupts(rdev);
6192 /* Wait and acknowledge irq */
6193 mdelay(1);
6194 si_irq_ack(rdev);
6195 si_disable_interrupt_state(rdev);
6196}
6197
6198static void si_irq_suspend(struct radeon_device *rdev)
6199{
6200 si_irq_disable(rdev);
6201 si_rlc_stop(rdev);
6202}
6203
6204static void si_irq_fini(struct radeon_device *rdev)
6205{
6206 si_irq_suspend(rdev);
6207 r600_ih_ring_fini(rdev);
6208}
6209
6210static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6211{
6212 u32 wptr, tmp;
6213
6214 if (rdev->wb.enabled)
6215 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4])((__uint32_t)(rdev->wb.wb[2048/4]));
6216 else
6217 wptr = RREG32(IH_RB_WPTR)r100_mm_rreg(rdev, (0x3e0c), 0);
6218
6219 if (wptr & RB_OVERFLOW(1 << 0)) {
6220 wptr &= ~RB_OVERFLOW(1 << 0);
6221 /* When a ring buffer overflow happen start parsing interrupt
6222 * from the last not overwritten vector (wptr + 16). Hopefully
6223 * this should allow us to catchup.
6224 */
6225 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",printf("drm:pid%d:%s *WARNING* " "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , wptr, rdev
->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask)
6226 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask)printf("drm:pid%d:%s *WARNING* " "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , wptr, rdev
->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask)
;
6227 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
6228 tmp = RREG32(IH_RB_CNTL)r100_mm_rreg(rdev, (0x3e00), 0);
6229 tmp |= IH_WPTR_OVERFLOW_CLEAR(1 << 31);
6230 WREG32(IH_RB_CNTL, tmp)r100_mm_wreg(rdev, (0x3e00), (tmp), 0);
6231 }
6232 return (wptr & rdev->ih.ptr_mask);
6233}
6234
6235/* SI IV Ring
6236 * Each IV ring entry is 128 bits:
6237 * [7:0] - interrupt source id
6238 * [31:8] - reserved
6239 * [59:32] - interrupt source data
6240 * [63:60] - reserved
6241 * [71:64] - RINGID
6242 * [79:72] - VMID
6243 * [127:80] - reserved
6244 */
6245int si_irq_process(struct radeon_device *rdev)
6246{
6247 u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
6248 u32 crtc_idx, hpd_idx;
6249 u32 mask;
6250 u32 wptr;
6251 u32 rptr;
6252 u32 src_id, src_data, ring_id;
6253 u32 ring_index;
6254 bool_Bool queue_hotplug = false0;
6255 bool_Bool queue_dp = false0;
6256 bool_Bool queue_thermal = false0;
6257 u32 status, addr;
6258 const char *event_name;
6259
6260 if (!rdev->ih.enabled || rdev->shutdown)
6261 return IRQ_NONE;
6262
6263 wptr = si_get_ih_wptr(rdev);
6264
6265 if (wptr == rdev->ih.rptr)
6266 return IRQ_NONE;
6267restart_ih:
6268 /* is somebody else already processing irqs? */
6269 if (atomic_xchg(&rdev->ih.lock, 1))
6270 return IRQ_NONE;
6271
6272 rptr = rdev->ih.rptr;
6273 DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr)__drm_dbg(DRM_UT_CORE, "si_irq_process start: rptr %d, wptr %d\n"
, rptr, wptr)
;
6274
6275 /* Order reading of wptr vs. reading of IH ring data */
6276 rmb()do { __asm volatile("lfence" ::: "memory"); } while (0);
6277
6278 /* display interrupts */
6279 si_irq_ack(rdev);
6280
6281 while (rptr != wptr) {
6282 /* wptr/rptr are in bytes! */
6283 ring_index = rptr / 4;
6284 src_id = le32_to_cpu(rdev->ih.ring[ring_index])((__uint32_t)(rdev->ih.ring[ring_index])) & 0xff;
6285 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1])((__uint32_t)(rdev->ih.ring[ring_index + 1])) & 0xfffffff;
6286 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2])((__uint32_t)(rdev->ih.ring[ring_index + 2])) & 0xff;
6287
6288 switch (src_id) {
6289 case 1: /* D1 vblank/vline */
6290 case 2: /* D2 vblank/vline */
6291 case 3: /* D3 vblank/vline */
6292 case 4: /* D4 vblank/vline */
6293 case 5: /* D5 vblank/vline */
6294 case 6: /* D6 vblank/vline */
6295 crtc_idx = src_id - 1;
6296
6297 if (src_data == 0) { /* vblank */
6298 mask = LB_D1_VBLANK_INTERRUPT(1 << 3);
6299 event_name = "vblank";
6300
6301 if (rdev->irq.crtc_vblank_int[crtc_idx]) {
6302 drm_handle_vblank(rdev->ddev, crtc_idx);
6303 rdev->pm.vblank_sync = true1;
6304 wake_up(&rdev->irq.vblank_queue);
6305 }
6306 if (atomic_read(&rdev->irq.pflip[crtc_idx])({ typeof(*(&rdev->irq.pflip[crtc_idx])) __tmp = *(volatile
typeof(*(&rdev->irq.pflip[crtc_idx])) *)&(*(&
rdev->irq.pflip[crtc_idx])); membar_datadep_consumer(); __tmp
; })
) {
6307 radeon_crtc_handle_vblank(rdev,
6308 crtc_idx);
6309 }
6310
6311 } else if (src_data == 1) { /* vline */
6312 mask = LB_D1_VLINE_INTERRUPT(1 << 2);
6313 event_name = "vline";
6314 } else {
6315 DRM_DEBUG("Unhandled interrupt: %d %d\n",__drm_dbg(DRM_UT_CORE, "Unhandled interrupt: %d %d\n", src_id
, src_data)
6316 src_id, src_data)__drm_dbg(DRM_UT_CORE, "Unhandled interrupt: %d %d\n", src_id
, src_data)
;
6317 break;
6318 }
6319
6320 if (!(disp_int[crtc_idx] & mask)) {
6321 DRM_DEBUG("IH: D%d %s - IH event w/o asserted irq bit?\n",__drm_dbg(DRM_UT_CORE, "IH: D%d %s - IH event w/o asserted irq bit?\n"
, crtc_idx + 1, event_name)
6322 crtc_idx + 1, event_name)__drm_dbg(DRM_UT_CORE, "IH: D%d %s - IH event w/o asserted irq bit?\n"
, crtc_idx + 1, event_name)
;
6323 }
6324
6325 disp_int[crtc_idx] &= ~mask;
6326 DRM_DEBUG("IH: D%d %s\n", crtc_idx + 1, event_name)__drm_dbg(DRM_UT_CORE, "IH: D%d %s\n", crtc_idx + 1, event_name
)
;
6327
6328 break;
6329 case 8: /* D1 page flip */
6330 case 10: /* D2 page flip */
6331 case 12: /* D3 page flip */
6332 case 14: /* D4 page flip */
6333 case 16: /* D5 page flip */
6334 case 18: /* D6 page flip */
6335 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1)__drm_dbg(DRM_UT_CORE, "IH: D%d flip\n", ((src_id - 8) >>
1) + 1)
;
6336 if (radeon_use_pflipirq > 0)
6337 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
6338 break;
6339 case 42: /* HPD hotplug */
6340 if (src_data <= 5) {
6341 hpd_idx = src_data;
6342 mask = DC_HPD1_INTERRUPT(1 << 17);
6343 queue_hotplug = true1;
6344 event_name = "HPD";
6345
6346 } else if (src_data <= 11) {
6347 hpd_idx = src_data - 6;
6348 mask = DC_HPD1_RX_INTERRUPT(1 << 18);
6349 queue_dp = true1;
6350 event_name = "HPD_RX";
6351
6352 } else {
6353 DRM_DEBUG("Unhandled interrupt: %d %d\n",__drm_dbg(DRM_UT_CORE, "Unhandled interrupt: %d %d\n", src_id
, src_data)
6354 src_id, src_data)__drm_dbg(DRM_UT_CORE, "Unhandled interrupt: %d %d\n", src_id
, src_data)
;
6355 break;
6356 }
6357
6358 if (!(disp_int[hpd_idx] & mask))
6359 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n")__drm_dbg(DRM_UT_CORE, "IH: IH event w/o asserted irq bit?\n"
)
;
6360
6361 disp_int[hpd_idx] &= ~mask;
6362 DRM_DEBUG("IH: %s%d\n", event_name, hpd_idx + 1)__drm_dbg(DRM_UT_CORE, "IH: %s%d\n", event_name, hpd_idx + 1);
6363 break;
6364 case 96:
6365 DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR))__drm_err("SRBM_READ_ERROR: 0x%x\n", r100_mm_rreg(rdev, (0xE98
), 0))
;
6366 WREG32(SRBM_INT_ACK, 0x1)r100_mm_wreg(rdev, (0xEA8), (0x1), 0);
6367 break;
6368 case 124: /* UVD */
6369 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data)__drm_dbg(DRM_UT_CORE, "IH: UVD int: 0x%08x\n", src_data);
6370 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX5);
6371 break;
6372 case 146:
6373 case 147:
6374 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)r100_mm_rreg(rdev, (0x14FC), 0);
6375 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)r100_mm_rreg(rdev, (0x14DC), 0);
6376 /* reset addr and status */
6377 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x1434), 0); tmp_ &=
(~1); tmp_ |= ((1) & ~(~1)); r100_mm_wreg(rdev, (0x1434)
, (tmp_), 0); } while (0)
;
6378 if (addr == 0x0 && status == 0x0)
6379 break;
6380 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data)printf("drm:pid%d:%s *ERROR* " "GPU fault detected: %d 0x%08x\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , src_id
, src_data)
;
6381 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",printf("drm:pid%d:%s *ERROR* " " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , addr)
6382 addr)printf("drm:pid%d:%s *ERROR* " " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , addr)
;
6383 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",printf("drm:pid%d:%s *ERROR* " " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , status
)
6384 status)printf("drm:pid%d:%s *ERROR* " " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , status
)
;
6385 si_vm_decode_fault(rdev, status, addr);
6386 break;
6387 case 176: /* RINGID0 CP_INT */
6388 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX0);
6389 break;
6390 case 177: /* RINGID1 CP_INT */
6391 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX1);
6392 break;
6393 case 178: /* RINGID2 CP_INT */
6394 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX2);
6395 break;
6396 case 181: /* CP EOP event */
6397 DRM_DEBUG("IH: CP EOP\n")__drm_dbg(DRM_UT_CORE, "IH: CP EOP\n");
6398 switch (ring_id) {
6399 case 0:
6400 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX0);
6401 break;
6402 case 1:
6403 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX1);
6404 break;
6405 case 2:
6406 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX2);
6407 break;
6408 }
6409 break;
6410 case 224: /* DMA trap event */
6411 DRM_DEBUG("IH: DMA trap\n")__drm_dbg(DRM_UT_CORE, "IH: DMA trap\n");
6412 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX3);
6413 break;
6414 case 230: /* thermal low to high */
6415 DRM_DEBUG("IH: thermal low to high\n")__drm_dbg(DRM_UT_CORE, "IH: thermal low to high\n");
6416 rdev->pm.dpm.thermal.high_to_low = false0;
6417 queue_thermal = true1;
6418 break;
6419 case 231: /* thermal high to low */
6420 DRM_DEBUG("IH: thermal high to low\n")__drm_dbg(DRM_UT_CORE, "IH: thermal high to low\n");
6421 rdev->pm.dpm.thermal.high_to_low = true1;
6422 queue_thermal = true1;
6423 break;
6424 case 233: /* GUI IDLE */
6425 DRM_DEBUG("IH: GUI idle\n")__drm_dbg(DRM_UT_CORE, "IH: GUI idle\n");
6426 break;
6427 case 244: /* DMA trap event */
6428 DRM_DEBUG("IH: DMA1 trap\n")__drm_dbg(DRM_UT_CORE, "IH: DMA1 trap\n");
6429 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX4);
6430 break;
6431 default:
6432 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data)__drm_dbg(DRM_UT_CORE, "Unhandled interrupt: %d %d\n", src_id
, src_data)
;
6433 break;
6434 }
6435
6436 /* wptr/rptr are in bytes! */
6437 rptr += 16;
6438 rptr &= rdev->ih.ptr_mask;
6439 WREG32(IH_RB_RPTR, rptr)r100_mm_wreg(rdev, (0x3e08), (rptr), 0);
6440 }
6441 if (queue_dp)
6442 schedule_work(&rdev->dp_work);
6443 if (queue_hotplug)
6444 schedule_delayed_work(&rdev->hotplug_work, 0);
6445 if (queue_thermal && rdev->pm.dpm_enabled)
6446 schedule_work(&rdev->pm.dpm.thermal.work);
6447 rdev->ih.rptr = rptr;
6448 atomic_set(&rdev->ih.lock, 0)({ typeof(*(&rdev->ih.lock)) __tmp = ((0)); *(volatile
typeof(*(&rdev->ih.lock)) *)&(*(&rdev->ih.
lock)) = __tmp; __tmp; })
;
6449
6450 /* make sure wptr hasn't changed while processing */
6451 wptr = si_get_ih_wptr(rdev);
6452 if (wptr != rptr)
6453 goto restart_ih;
6454
6455 return IRQ_HANDLED;
6456}
6457
6458/*
6459 * startup/shutdown callbacks
6460 */
6461static void si_uvd_init(struct radeon_device *rdev)
6462{
6463 int r;
6464
6465 if (!rdev->has_uvd)
6466 return;
6467
6468 r = radeon_uvd_init(rdev);
6469 if (r) {
6470 dev_err(rdev->dev, "failed UVD (%d) init.\n", r)printf("drm:pid%d:%s *ERROR* " "failed UVD (%d) init.\n", ({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6471 /*
6472 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
6473 * to early fails uvd_v2_2_resume() and thus nothing happens
6474 * there. So it is pointless to try to go through that code
6475 * hence why we disable uvd here.
6476 */
6477 rdev->has_uvd = false0;
6478 return;
6479 }
6480 rdev->ring[R600_RING_TYPE_UVD_INDEX5].ring_obj = NULL((void *)0);
6481 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX5], 4096);
6482}
6483
6484static void si_uvd_start(struct radeon_device *rdev)
6485{
6486 int r;
6487
6488 if (!rdev->has_uvd)
6489 return;
6490
6491 r = uvd_v2_2_resume(rdev);
6492 if (r) {
6493 dev_err(rdev->dev, "failed UVD resume (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed UVD resume (%d).\n", (
{struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6494 goto error;
6495 }
6496 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX5);
6497 if (r) {
6498 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing UVD fences (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6499 goto error;
6500 }
6501 return;
6502
6503error:
6504 rdev->ring[R600_RING_TYPE_UVD_INDEX5].ring_size = 0;
6505}
6506
6507static void si_uvd_resume(struct radeon_device *rdev)
6508{
6509 struct radeon_ring *ring;
6510 int r;
6511
6512 if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX5].ring_size)
6513 return;
6514
6515 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX5];
6516 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)((0 << 30) | (((0xEFFC) >> 2) & 0xFFFF) | ((0
) & 0x3FFF) << 16)
);
6517 if (r) {
6518 dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing UVD ring (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6519 return;
6520 }
6521 r = uvd_v1_0_init(rdev);
6522 if (r) {
6523 dev_err(rdev->dev, "failed initializing UVD (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing UVD (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6524 return;
6525 }
6526}
6527
6528static void si_vce_init(struct radeon_device *rdev)
6529{
6530 int r;
6531
6532 if (!rdev->has_vce)
6533 return;
6534
6535 r = radeon_vce_init(rdev);
6536 if (r) {
6537 dev_err(rdev->dev, "failed VCE (%d) init.\n", r)printf("drm:pid%d:%s *ERROR* " "failed VCE (%d) init.\n", ({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6538 /*
6539 * At this point rdev->vce.vcpu_bo is NULL which trickles down
6540 * to early fails si_vce_start() and thus nothing happens
6541 * there. So it is pointless to try to go through that code
6542 * hence why we disable vce here.
6543 */
6544 rdev->has_vce = false0;
6545 return;
6546 }
6547 rdev->ring[TN_RING_TYPE_VCE1_INDEX6].ring_obj = NULL((void *)0);
6548 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX6], 4096);
6549 rdev->ring[TN_RING_TYPE_VCE2_INDEX7].ring_obj = NULL((void *)0);
6550 r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX7], 4096);
6551}
6552
6553static void si_vce_start(struct radeon_device *rdev)
6554{
6555 int r;
6556
6557 if (!rdev->has_vce)
6558 return;
6559
6560 r = radeon_vce_resume(rdev);
6561 if (r) {
6562 dev_err(rdev->dev, "failed VCE resume (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed VCE resume (%d).\n", (
{struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6563 goto error;
6564 }
6565 r = vce_v1_0_resume(rdev);
6566 if (r) {
6567 dev_err(rdev->dev, "failed VCE resume (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed VCE resume (%d).\n", (
{struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6568 goto error;
6569 }
6570 r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX6);
6571 if (r) {
6572 dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing VCE1 fences (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6573 goto error;
6574 }
6575 r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX7);
6576 if (r) {
6577 dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing VCE2 fences (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6578 goto error;
6579 }
6580 return;
6581
6582error:
6583 rdev->ring[TN_RING_TYPE_VCE1_INDEX6].ring_size = 0;
6584 rdev->ring[TN_RING_TYPE_VCE2_INDEX7].ring_size = 0;
6585}
6586
6587static void si_vce_resume(struct radeon_device *rdev)
6588{
6589 struct radeon_ring *ring;
6590 int r;
6591
6592 if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX6].ring_size)
6593 return;
6594
6595 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX6];
6596 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP0x00000000);
6597 if (r) {
6598 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing VCE1 ring (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6599 return;
6600 }
6601 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX7];
6602 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP0x00000000);
6603 if (r) {
6604 dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing VCE1 ring (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6605 return;
6606 }
6607 r = vce_v1_0_init(rdev);
6608 if (r) {
6609 dev_err(rdev->dev, "failed initializing VCE (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing VCE (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6610 return;
6611 }
6612}
6613
6614static int si_startup(struct radeon_device *rdev)
6615{
6616 struct radeon_ring *ring;
6617 int r;
6618
6619 /* enable pcie gen2/3 link */
6620 si_pcie_gen3_enable(rdev);
6621 /* enable aspm */
6622 si_program_aspm(rdev);
6623
6624 /* scratch needs to be initialized before MC */
6625 r = r600_vram_scratch_init(rdev);
6626 if (r)
6627 return r;
6628
6629 si_mc_program(rdev);
6630
6631 if (!rdev->pm.dpm_enabled) {
6632 r = si_mc_load_microcode(rdev);
6633 if (r) {
6634 DRM_ERROR("Failed to load MC firmware!\n")__drm_err("Failed to load MC firmware!\n");
6635 return r;
6636 }
6637 }
6638
6639 r = si_pcie_gart_enable(rdev);
6640 if (r)
6641 return r;
6642 si_gpu_init(rdev);
6643
6644 /* allocate rlc buffers */
6645 if (rdev->family == CHIP_VERDE) {
6646 rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
6647 rdev->rlc.reg_list_size =
6648 (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list)(sizeof((verde_rlc_save_restore_register_list)) / sizeof((verde_rlc_save_restore_register_list
)[0]))
;
6649 }
6650 rdev->rlc.cs_data = si_cs_data;
6651 r = sumo_rlc_init(rdev);
6652 if (r) {
6653 DRM_ERROR("Failed to init rlc BOs!\n")__drm_err("Failed to init rlc BOs!\n");
6654 return r;
6655 }
6656
6657 /* allocate wb buffer */
6658 r = radeon_wb_init(rdev);
6659 if (r)
6660 return r;
6661
6662 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX0);
6663 if (r) {
6664 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing CP fences (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6665 return r;
6666 }
6667
6668 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX1);
6669 if (r) {
6670 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing CP fences (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6671 return r;
6672 }
6673
6674 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX2);
6675 if (r) {
6676 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing CP fences (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6677 return r;
6678 }
6679
6680 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX3);
6681 if (r) {
6682 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing DMA fences (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6683 return r;
6684 }
6685
6686 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX4);
6687 if (r) {
6688 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing DMA fences (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6689 return r;
6690 }
6691
6692 si_uvd_start(rdev);
6693 si_vce_start(rdev);
6694
6695 /* Enable IRQ */
6696 if (!rdev->irq.installed) {
6697 r = radeon_irq_kms_init(rdev);
6698 if (r)
6699 return r;
6700 }
6701
6702 r = si_irq_init(rdev);
6703 if (r) {
6704 DRM_ERROR("radeon: IH init failed (%d).\n", r)__drm_err("radeon: IH init failed (%d).\n", r);
6705 radeon_irq_kms_fini(rdev);
6706 return r;
6707 }
6708 si_irq_set(rdev);
6709
6710 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX0];
6711 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET1024,
6712 RADEON_CP_PACKET20x80000000);
6713 if (r)
6714 return r;
6715
6716 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX1];
6717 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET1280,
6718 RADEON_CP_PACKET20x80000000);
6719 if (r)
6720 return r;
6721
6722 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX2];
6723 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET1536,
6724 RADEON_CP_PACKET20x80000000);
6725 if (r)
6726 return r;
6727
6728 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX3];
6729 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET1792,
6730 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)((((0xf) & 0xF) << 28) | (((0) & 0x1) << 26
) | (((0) & 0x1) << 23) | (((0) & 0x1) <<
22) | (((0) & 0xFFFFF) << 0))
);
6731 if (r)
6732 return r;
6733
6734 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX4];
6735 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET2304,
6736 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)((((0xf) & 0xF) << 28) | (((0) & 0x1) << 26
) | (((0) & 0x1) << 23) | (((0) & 0x1) <<
22) | (((0) & 0xFFFFF) << 0))
);
6737 if (r)
6738 return r;
6739
6740 r = si_cp_load_microcode(rdev);
6741 if (r)
6742 return r;
6743 r = si_cp_resume(rdev);
6744 if (r)
6745 return r;
6746
6747 r = cayman_dma_resume(rdev);
6748 if (r)
6749 return r;
6750
6751 si_uvd_resume(rdev);
6752 si_vce_resume(rdev);
6753
6754 r = radeon_ib_pool_init(rdev);
6755 if (r) {
6756 dev_err(rdev->dev, "IB initialization failed (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "IB initialization failed (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6757 return r;
6758 }
6759
6760 r = radeon_vm_manager_init(rdev);
6761 if (r) {
6762 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "vm manager initialization failed (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
6763 return r;
6764 }
6765
6766 r = radeon_audio_init(rdev);
6767 if (r)
6768 return r;
6769
6770 return 0;
6771}
6772
6773int si_resume(struct radeon_device *rdev)
6774{
6775 int r;
6776
6777 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
6778 * posting will perform necessary task to bring back GPU into good
6779 * shape.
6780 */
6781 /* post card */
6782 atom_asic_init(rdev->mode_info.atom_context);
6783
6784 /* init golden registers */
6785 si_init_golden_registers(rdev);
6786
6787 if (rdev->pm.pm_method == PM_METHOD_DPM)
6788 radeon_pm_resume(rdev);
6789
6790 rdev->accel_working = true1;
6791 r = si_startup(rdev);
6792 if (r) {
6793 DRM_ERROR("si startup failed on resume\n")__drm_err("si startup failed on resume\n");
6794 rdev->accel_working = false0;
6795 return r;
6796 }
6797
6798 return r;
6799
6800}
6801
6802int si_suspend(struct radeon_device *rdev)
6803{
6804 radeon_pm_suspend(rdev);
6805 radeon_audio_fini(rdev);
6806 radeon_vm_manager_fini(rdev);
6807 si_cp_enable(rdev, false0);
6808 cayman_dma_stop(rdev);
6809 if (rdev->has_uvd) {
6810 uvd_v1_0_fini(rdev);
6811 radeon_uvd_suspend(rdev);
6812 }
6813 if (rdev->has_vce)
6814 radeon_vce_suspend(rdev);
6815 si_fini_pg(rdev);
6816 si_fini_cg(rdev);
6817 si_irq_suspend(rdev);
6818 radeon_wb_disable(rdev);
6819 si_pcie_gart_disable(rdev);
6820 return 0;
6821}
6822
6823/* Plan is to move initialization in that function and use
6824 * helper function so that radeon_device_init pretty much
6825 * do nothing more than calling asic specific function. This
6826 * should also allow to remove a bunch of callback function
6827 * like vram_info.
6828 */
6829int si_init(struct radeon_device *rdev)
6830{
6831 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX0];
6832 int r;
6833
6834 /* Read BIOS */
6835 if (!radeon_get_bios(rdev)) {
6836 if (ASIC_IS_AVIVO(rdev)((rdev->family >= CHIP_RS600)))
6837 return -EINVAL22;
6838 }
6839 /* Must be an ATOMBIOS */
6840 if (!rdev->is_atom_bios) {
6841 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n")printf("drm:pid%d:%s *ERROR* " "Expecting atombios for cayman GPU\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
6842 return -EINVAL22;
6843 }
6844 r = radeon_atombios_init(rdev);
6845 if (r)
6846 return r;
6847
6848 /* Post card if necessary */
6849 if (!radeon_card_posted(rdev)) {
6850 if (!rdev->bios) {
6851 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n")printf("drm:pid%d:%s *ERROR* " "Card not posted and no BIOS - ignoring\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
6852 return -EINVAL22;
6853 }
6854 DRM_INFO("GPU not posted. posting now...\n")printk("\0016" "[" "drm" "] " "GPU not posted. posting now...\n"
)
;
6855 atom_asic_init(rdev->mode_info.atom_context);
6856 }
6857 /* init golden registers */
6858 si_init_golden_registers(rdev);
6859 /* Initialize scratch registers */
6860 si_scratch_init(rdev);
6861 /* Initialize surface registers */
6862 radeon_surface_init(rdev);
6863 /* Initialize clocks */
6864 radeon_get_clock_info(rdev->ddev);
6865
6866 /* Fence driver */
6867 r = radeon_fence_driver_init(rdev);
6868 if (r)
6869 return r;
6870
6871 /* initialize memory controller */
6872 r = si_mc_init(rdev);
6873 if (r)
6874 return r;
6875 /* Memory manager */
6876 r = radeon_bo_init(rdev);
6877 if (r)
6878 return r;
6879
6880 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
6881 !rdev->rlc_fw || !rdev->mc_fw) {
6882 r = si_init_microcode(rdev);
6883 if (r) {
6884 DRM_ERROR("Failed to load firmware!\n")__drm_err("Failed to load firmware!\n");
6885 return r;
6886 }
6887 }
6888
6889 /* Initialize power management */
6890 radeon_pm_init(rdev);
6891
6892 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX0];
6893 ring->ring_obj = NULL((void *)0);
6894 r600_ring_init(rdev, ring, 1024 * 1024);
6895
6896 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX1];
6897 ring->ring_obj = NULL((void *)0);
6898 r600_ring_init(rdev, ring, 1024 * 1024);
6899
6900 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX2];
6901 ring->ring_obj = NULL((void *)0);
6902 r600_ring_init(rdev, ring, 1024 * 1024);
6903
6904 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX3];
6905 ring->ring_obj = NULL((void *)0);
6906 r600_ring_init(rdev, ring, 64 * 1024);
6907
6908 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX4];
6909 ring->ring_obj = NULL((void *)0);
6910 r600_ring_init(rdev, ring, 64 * 1024);
6911
6912 si_uvd_init(rdev);
6913 si_vce_init(rdev);
6914
6915 rdev->ih.ring_obj = NULL((void *)0);
6916 r600_ih_ring_init(rdev, 64 * 1024);
6917
6918 r = r600_pcie_gart_init(rdev);
6919 if (r)
6920 return r;
6921
6922 rdev->accel_working = true1;
6923 r = si_startup(rdev);
6924 if (r) {
6925 dev_err(rdev->dev, "disabling GPU acceleration\n")printf("drm:pid%d:%s *ERROR* " "disabling GPU acceleration\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
6926 si_cp_fini(rdev);
6927 cayman_dma_fini(rdev);
6928 si_irq_fini(rdev);
6929 sumo_rlc_fini(rdev);
6930 radeon_wb_fini(rdev);
6931 radeon_ib_pool_fini(rdev);
6932 radeon_vm_manager_fini(rdev);
6933 radeon_irq_kms_fini(rdev);
6934 si_pcie_gart_fini(rdev);
6935 rdev->accel_working = false0;
6936 }
6937
6938 /* Don't start up if the MC ucode is missing.
6939 * The default clocks and voltages before the MC ucode
6940 * is loaded are not suffient for advanced operations.
6941 */
6942 if (!rdev->mc_fw) {
6943 DRM_ERROR("radeon: MC ucode required for NI+.\n")__drm_err("radeon: MC ucode required for NI+.\n");
6944 return -EINVAL22;
6945 }
6946
6947 return 0;
6948}
6949
6950void si_fini(struct radeon_device *rdev)
6951{
6952 radeon_pm_fini(rdev);
6953 si_cp_fini(rdev);
6954 cayman_dma_fini(rdev);
6955 si_fini_pg(rdev);
6956 si_fini_cg(rdev);
6957 si_irq_fini(rdev);
6958 sumo_rlc_fini(rdev);
6959 radeon_wb_fini(rdev);
6960 radeon_vm_manager_fini(rdev);
6961 radeon_ib_pool_fini(rdev);
6962 radeon_irq_kms_fini(rdev);
6963 if (rdev->has_uvd) {
6964 uvd_v1_0_fini(rdev);
6965 radeon_uvd_fini(rdev);
6966 }
6967 if (rdev->has_vce)
6968 radeon_vce_fini(rdev);
6969 si_pcie_gart_fini(rdev);
6970 r600_vram_scratch_fini(rdev);
6971 radeon_gem_fini(rdev);
6972 radeon_fence_driver_fini(rdev);
6973 radeon_bo_fini(rdev);
6974 radeon_atombios_fini(rdev);
6975 kfree(rdev->bios);
6976 rdev->bios = NULL((void *)0);
6977}
6978
6979/**
6980 * si_get_gpu_clock_counter - return GPU clock counter snapshot
6981 *
6982 * @rdev: radeon_device pointer
6983 *
6984 * Fetches a GPU clock counter snapshot (SI).
6985 * Returns the 64 bit clock counter snapshot.
6986 */
6987uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
6988{
6989 uint64_t clock;
6990
6991 mutex_lock(&rdev->gpu_clock_mutex)rw_enter_write(&rdev->gpu_clock_mutex);
6992 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1)r100_mm_wreg(rdev, (0xC340), (1), 0);
6993 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB)r100_mm_rreg(rdev, (0xC338), 0) |
6994 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB)r100_mm_rreg(rdev, (0xC33C), 0) << 32ULL);
6995 mutex_unlock(&rdev->gpu_clock_mutex)rw_exit_write(&rdev->gpu_clock_mutex);
6996 return clock;
6997}
6998
6999int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
7000{
7001 unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
7002 int r;
7003
7004 /* bypass vclk and dclk with bclk */
7005 WREG32_P(CG_UPLL_FUNC_CNTL_2,do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x638), 0); tmp_ &=
(~(0x01F00000 | 0x3E000000)); tmp_ |= ((((1) << 20) | (
(1) << 25)) & ~(~(0x01F00000 | 0x3E000000))); r100_mm_wreg
(rdev, (0x638), (tmp_), 0); } while (0)
7006 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x638), 0); tmp_ &=
(~(0x01F00000 | 0x3E000000)); tmp_ |= ((((1) << 20) | (
(1) << 25)) & ~(~(0x01F00000 | 0x3E000000))); r100_mm_wreg
(rdev, (0x638), (tmp_), 0); } while (0)
7007 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK))do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x638), 0); tmp_ &=
(~(0x01F00000 | 0x3E000000)); tmp_ |= ((((1) << 20) | (
(1) << 25)) & ~(~(0x01F00000 | 0x3E000000))); r100_mm_wreg
(rdev, (0x638), (tmp_), 0); } while (0)
;
7008
7009 /* put PLL in bypass mode */
7010 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x634), 0); tmp_ &=
(~0x00000004); tmp_ |= ((0x00000004) & ~(~0x00000004)); r100_mm_wreg
(rdev, (0x634), (tmp_), 0); } while (0)
;
7011
7012 if (!vclk || !dclk) {
7013 /* keep the Bypass mode */
7014 return 0;
7015 }
7016
7017 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
7018 16384, 0x03FFFFFF, 0, 128, 5,
7019 &fb_div, &vclk_div, &dclk_div);
7020 if (r)
7021 return r;
7022
7023 /* set RESET_ANTI_MUX to 0 */
7024 WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x648), 0); tmp_ &=
(~0x00000200); tmp_ |= ((0) & ~(~0x00000200)); r100_mm_wreg
(rdev, (0x648), (tmp_), 0); } while (0)
;
7025
7026 /* set VCO_MODE to 1 */
7027 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x634), 0); tmp_ &=
(~0x00000600); tmp_ |= ((0x00000600) & ~(~0x00000600)); r100_mm_wreg
(rdev, (0x634), (tmp_), 0); } while (0)
;
7028
7029 /* disable sleep mode */
7030 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x634), 0); tmp_ &=
(~0x00000002); tmp_ |= ((0) & ~(~0x00000002)); r100_mm_wreg
(rdev, (0x634), (tmp_), 0); } while (0)
;
7031
7032 /* deassert UPLL_RESET */
7033 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x634), 0); tmp_ &=
(~0x00000001); tmp_ |= ((0) & ~(~0x00000001)); r100_mm_wreg
(rdev, (0x634), (tmp_), 0); } while (0)
;
7034
7035 mdelay(1);
7036
7037 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL0x634);
7038 if (r)
7039 return r;
7040
7041 /* assert UPLL_RESET again */
7042 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x634), 0); tmp_ &=
(~0x00000001); tmp_ |= ((0x00000001) & ~(~0x00000001)); r100_mm_wreg
(rdev, (0x634), (tmp_), 0); } while (0)
;
7043
7044 /* disable spread spectrum. */
7045 WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x650), 0); tmp_ &=
(~0x00000001); tmp_ |= ((0) & ~(~0x00000001)); r100_mm_wreg
(rdev, (0x650), (tmp_), 0); } while (0)
;
7046
7047 /* set feedback divider */
7048 WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x63C), 0); tmp_ &=
(~0x01FFFFFF); tmp_ |= ((((fb_div) << 0)) & ~(~0x01FFFFFF
)); r100_mm_wreg(rdev, (0x63C), (tmp_), 0); } while (0)
;
7049
7050 /* set ref divider to 0 */
7051 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x634), 0); tmp_ &=
(~0x003F0000); tmp_ |= ((0) & ~(~0x003F0000)); r100_mm_wreg
(rdev, (0x634), (tmp_), 0); } while (0)
;
7052
7053 if (fb_div < 307200)
7054 WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x644), 0); tmp_ &=
(~0x00020000); tmp_ |= ((0) & ~(~0x00020000)); r100_mm_wreg
(rdev, (0x644), (tmp_), 0); } while (0)
;
7055 else
7056 WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x644), 0); tmp_ &=
(~0x00020000); tmp_ |= ((0x00020000) & ~(~0x00020000)); r100_mm_wreg
(rdev, (0x644), (tmp_), 0); } while (0)
;
7057
7058 /* set PDIV_A and PDIV_B */
7059 WREG32_P(CG_UPLL_FUNC_CNTL_2,do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x638), 0); tmp_ &=
(~(0x0000007F | 0x00007F00)); tmp_ |= ((((vclk_div) <<
0) | ((dclk_div) << 8)) & ~(~(0x0000007F | 0x00007F00
))); r100_mm_wreg(rdev, (0x638), (tmp_), 0); } while (0)
7060 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x638), 0); tmp_ &=
(~(0x0000007F | 0x00007F00)); tmp_ |= ((((vclk_div) <<
0) | ((dclk_div) << 8)) & ~(~(0x0000007F | 0x00007F00
))); r100_mm_wreg(rdev, (0x638), (tmp_), 0); } while (0)
7061 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK))do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x638), 0); tmp_ &=
(~(0x0000007F | 0x00007F00)); tmp_ |= ((((vclk_div) <<
0) | ((dclk_div) << 8)) & ~(~(0x0000007F | 0x00007F00
))); r100_mm_wreg(rdev, (0x638), (tmp_), 0); } while (0)
;
7062
7063 /* give the PLL some time to settle */
7064 mdelay(15);
7065
7066 /* deassert PLL_RESET */
7067 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x634), 0); tmp_ &=
(~0x00000001); tmp_ |= ((0) & ~(~0x00000001)); r100_mm_wreg
(rdev, (0x634), (tmp_), 0); } while (0)
;
7068
7069 mdelay(15);
7070
7071 /* switch from bypass mode to normal mode */
7072 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x634), 0); tmp_ &=
(~0x00000004); tmp_ |= ((0) & ~(~0x00000004)); r100_mm_wreg
(rdev, (0x634), (tmp_), 0); } while (0)
;
7073
7074 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL0x634);
7075 if (r)
7076 return r;
7077
7078 /* switch VCLK and DCLK selection */
7079 WREG32_P(CG_UPLL_FUNC_CNTL_2,do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x638), 0); tmp_ &=
(~(0x01F00000 | 0x3E000000)); tmp_ |= ((((2) << 20) | (
(2) << 25)) & ~(~(0x01F00000 | 0x3E000000))); r100_mm_wreg
(rdev, (0x638), (tmp_), 0); } while (0)
7080 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x638), 0); tmp_ &=
(~(0x01F00000 | 0x3E000000)); tmp_ |= ((((2) << 20) | (
(2) << 25)) & ~(~(0x01F00000 | 0x3E000000))); r100_mm_wreg
(rdev, (0x638), (tmp_), 0); } while (0)
7081 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK))do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x638), 0); tmp_ &=
(~(0x01F00000 | 0x3E000000)); tmp_ |= ((((2) << 20) | (
(2) << 25)) & ~(~(0x01F00000 | 0x3E000000))); r100_mm_wreg
(rdev, (0x638), (tmp_), 0); } while (0)
;
7082
7083 mdelay(100);
7084
7085 return 0;
7086}
7087
7088static void si_pcie_gen3_enable(struct radeon_device *rdev)
7089{
7090 struct pci_dev *root = rdev->pdev->bus->self;
7091 enum pci_bus_speed speed_cap;
7092 u32 speed_cntl, current_data_rate;
7093 int i;
7094 u16 tmp16;
7095
7096 if (pci_is_root_bus(rdev->pdev->bus))
7097 return;
7098
7099 if (radeon_pcie_gen2 == 0)
7100 return;
7101
7102 if (rdev->flags & RADEON_IS_IGP)
7103 return;
7104
7105 if (!(rdev->flags & RADEON_IS_PCIE))
7106 return;
7107
7108 speed_cap = pcie_get_speed_cap(root);
7109 if (speed_cap == PCI_SPEED_UNKNOWN)
7110 return;
7111
7112 if ((speed_cap != PCIE_SPEED_8_0GT) &&
7113 (speed_cap != PCIE_SPEED_5_0GT))
7114 return;
7115
7116 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL)rdev->pciep_rreg(rdev, (0xa4));
7117 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK(0x3 << 13)) >>
7118 LC_CURRENT_DATA_RATE_SHIFT13;
7119 if (speed_cap == PCIE_SPEED_8_0GT) {
7120 if (current_data_rate == 2) {
7121 DRM_INFO("PCIE gen 3 link speeds already enabled\n")printk("\0016" "[" "drm" "] " "PCIE gen 3 link speeds already enabled\n"
)
;
7122 return;
7123 }
7124 DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n")printk("\0016" "[" "drm" "] " "enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n"
)
;
7125 } else if (speed_cap == PCIE_SPEED_5_0GT) {
7126 if (current_data_rate == 1) {
7127 DRM_INFO("PCIE gen 2 link speeds already enabled\n")printk("\0016" "[" "drm" "] " "PCIE gen 2 link speeds already enabled\n"
)
;
7128 return;
7129 }
7130 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n")printk("\0016" "[" "drm" "] " "enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"
)
;
7131 }
7132
7133 if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
7134 return;
7135
7136 if (speed_cap == PCIE_SPEED_8_0GT) {
7137 /* re-try equalization if gen3 is not already enabled */
7138 if (current_data_rate != 2) {
7139 u16 bridge_cfg, gpu_cfg;
7140 u16 bridge_cfg2, gpu_cfg2;
7141 u32 max_lw, current_lw, tmp;
7142
7143 pcie_capability_read_word(root, PCI_EXP_LNKCTL0x10,
7144 &bridge_cfg);
7145 pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL0x10,
7146 &gpu_cfg);
7147
7148 tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD0x0200;
7149 pcie_capability_write_word(root, PCI_EXP_LNKCTL0x10, tmp16);
7150
7151 tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD0x0200;
7152 pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL0x10,
7153 tmp16);
7154
7155 tmp = RREG32_PCIE(PCIE_LC_STATUS1)rv370_pcie_rreg(rdev, (0x28));
7156 max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK(0x7 << 5)) >> LC_DETECTED_LINK_WIDTH_SHIFT5;
7157 current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK(0x7 << 2)) >> LC_OPERATING_LINK_WIDTH_SHIFT2;
7158
7159 if (current_lw < max_lw) {
7160 tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL)rdev->pciep_rreg(rdev, (0xa2));
7161 if (tmp & LC_RENEGOTIATION_SUPPORT(1 << 9)) {
7162 tmp &= ~(LC_LINK_WIDTH_MASK0x7 | LC_UPCONFIGURE_DIS(1 << 13));
7163 tmp |= (max_lw << LC_LINK_WIDTH_SHIFT0);
7164 tmp |= LC_UPCONFIGURE_SUPPORT(1 << 12) | LC_RENEGOTIATE_EN(1 << 10) | LC_RECONFIG_NOW(1 << 8);
7165 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp)rdev->pciep_wreg(rdev, (0xa2), (tmp));
7166 }
7167 }
7168
7169 for (i = 0; i < 10; i++) {
7170 /* check status */
7171 pcie_capability_read_word(rdev->pdev,
7172 PCI_EXP_DEVSTA0x0a,
7173 &tmp16);
7174 if (tmp16 & PCI_EXP_DEVSTA_TRPND0x0020)
7175 break;
7176
7177 pcie_capability_read_word(root, PCI_EXP_LNKCTL0x10,
7178 &bridge_cfg);
7179 pcie_capability_read_word(rdev->pdev,
7180 PCI_EXP_LNKCTL0x10,
7181 &gpu_cfg);
7182
7183 pcie_capability_read_word(root, PCI_EXP_LNKCTL20x30,
7184 &bridge_cfg2);
7185 pcie_capability_read_word(rdev->pdev,
7186 PCI_EXP_LNKCTL20x30,
7187 &gpu_cfg2);
7188
7189 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4)rdev->pciep_rreg(rdev, (0xb6));
7190 tmp |= LC_SET_QUIESCE(1 << 13);
7191 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp)rdev->pciep_wreg(rdev, (0xb6), (tmp));
7192
7193 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4)rdev->pciep_rreg(rdev, (0xb6));
7194 tmp |= LC_REDO_EQ(1 << 5);
7195 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp)rdev->pciep_wreg(rdev, (0xb6), (tmp));
7196
7197 drm_msleep(100)mdelay(100);
7198
7199 /* linkctl */
7200 pcie_capability_read_word(root, PCI_EXP_LNKCTL0x10,
7201 &tmp16);
7202 tmp16 &= ~PCI_EXP_LNKCTL_HAWD0x0200;
7203 tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD0x0200);
7204 pcie_capability_write_word(root,
7205 PCI_EXP_LNKCTL0x10,
7206 tmp16);
7207
7208 pcie_capability_read_word(rdev->pdev,
7209 PCI_EXP_LNKCTL0x10,
7210 &tmp16);
7211 tmp16 &= ~PCI_EXP_LNKCTL_HAWD0x0200;
7212 tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD0x0200);
7213 pcie_capability_write_word(rdev->pdev,
7214 PCI_EXP_LNKCTL0x10,
7215 tmp16);
7216
7217 /* linkctl2 */
7218 pcie_capability_read_word(root, PCI_EXP_LNKCTL20x30,
7219 &tmp16);
7220 tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP0x0010 |
7221 PCI_EXP_LNKCTL2_TX_MARGIN0x0380);
7222 tmp16 |= (bridge_cfg2 &
7223 (PCI_EXP_LNKCTL2_ENTER_COMP0x0010 |
7224 PCI_EXP_LNKCTL2_TX_MARGIN0x0380));
7225 pcie_capability_write_word(root,
7226 PCI_EXP_LNKCTL20x30,
7227 tmp16);
7228
7229 pcie_capability_read_word(rdev->pdev,
7230 PCI_EXP_LNKCTL20x30,
7231 &tmp16);
7232 tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP0x0010 |
7233 PCI_EXP_LNKCTL2_TX_MARGIN0x0380);
7234 tmp16 |= (gpu_cfg2 &
7235 (PCI_EXP_LNKCTL2_ENTER_COMP0x0010 |
7236 PCI_EXP_LNKCTL2_TX_MARGIN0x0380));
7237 pcie_capability_write_word(rdev->pdev,
7238 PCI_EXP_LNKCTL20x30,
7239 tmp16);
7240
7241 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4)rdev->pciep_rreg(rdev, (0xb6));
7242 tmp &= ~LC_SET_QUIESCE(1 << 13);
7243 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp)rdev->pciep_wreg(rdev, (0xb6), (tmp));
7244 }
7245 }
7246 }
7247
7248 /* set the link speed */
7249 speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE(1 << 5) | LC_FORCE_DIS_HW_SPEED_CHANGE(1 << 8);
7250 speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE(1 << 6);
7251 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl)rdev->pciep_wreg(rdev, (0xa4), (speed_cntl));
7252
7253 pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL20x30, &tmp16);
7254 tmp16 &= ~PCI_EXP_LNKCTL2_TLS0x0000000f;
7255 if (speed_cap == PCIE_SPEED_8_0GT)
7256 tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT0x00000003; /* gen3 */
7257 else if (speed_cap == PCIE_SPEED_5_0GT)
7258 tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT0x00000002; /* gen2 */
7259 else
7260 tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT0x00000001; /* gen1 */
7261 pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL20x30, tmp16);
7262
7263 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL)rdev->pciep_rreg(rdev, (0xa4));
7264 speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE(1 << 9);
7265 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl)rdev->pciep_wreg(rdev, (0xa4), (speed_cntl));
7266
7267 for (i = 0; i < rdev->usec_timeout; i++) {
7268 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL)rdev->pciep_rreg(rdev, (0xa4));
7269 if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE(1 << 9)) == 0)
7270 break;
7271 udelay(1);
7272 }
7273}
7274
7275static void si_program_aspm(struct radeon_device *rdev)
7276{
7277 u32 data, orig;
7278 bool_Bool disable_l0s = false0, disable_l1 = false0, disable_plloff_in_l1 = false0;
7279 bool_Bool disable_clkreq = false0;
7280
7281 if (radeon_aspm == 0)
7282 return;
7283
7284 if (!(rdev->flags & RADEON_IS_PCIE))
7285 return;
7286
7287 orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL)rdev->pciep_rreg(rdev, (0xa3));
7288 data &= ~LC_XMIT_N_FTS_MASK(0xff << 0);
7289 data |= LC_XMIT_N_FTS(0x24)((0x24) << 0) | LC_XMIT_N_FTS_OVERRIDE_EN(1 << 8);
7290 if (orig != data)
7291 WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data)rdev->pciep_wreg(rdev, (0xa3), (data));
7292
7293 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3)rdev->pciep_rreg(rdev, (0xb5));
7294 data |= LC_GO_TO_RECOVERY(1 << 30);
7295 if (orig != data)
7296 WREG32_PCIE_PORT(PCIE_LC_CNTL3, data)rdev->pciep_wreg(rdev, (0xb5), (data));
7297
7298 orig = data = RREG32_PCIE(PCIE_P_CNTL)rv370_pcie_rreg(rdev, (0x40));
7299 data |= P_IGNORE_EDB_ERR(1 << 6);
7300 if (orig != data)
7301 WREG32_PCIE(PCIE_P_CNTL, data)rv370_pcie_wreg(rdev, (0x40), (data));
7302
7303 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL)rdev->pciep_rreg(rdev, (0xa0));
7304 data &= ~(LC_L0S_INACTIVITY_MASK(0xf << 8) | LC_L1_INACTIVITY_MASK(0xf << 12));
7305 data |= LC_PMI_TO_L1_DIS(1 << 16);
7306 if (!disable_l0s)
7307 data |= LC_L0S_INACTIVITY(7)((7) << 8);
7308
7309 if (!disable_l1) {
7310 data |= LC_L1_INACTIVITY(7)((7) << 12);
7311 data &= ~LC_PMI_TO_L1_DIS(1 << 16);
7312 if (orig != data)
7313 WREG32_PCIE_PORT(PCIE_LC_CNTL, data)rdev->pciep_wreg(rdev, (0xa0), (data));
7314
7315 if (!disable_plloff_in_l1) {
7316 bool_Bool clk_req_support;
7317
7318 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0)eg_pif_phy0_rreg(rdev, (0x12));
7319 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK(0x7 << 10) | PLL_POWER_STATE_IN_TXS2_0_MASK(0x7 << 7));
7320 data |= PLL_POWER_STATE_IN_OFF_0(7)((7) << 10) | PLL_POWER_STATE_IN_TXS2_0(7)((7) << 7);
7321 if (orig != data)
7322 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data)eg_pif_phy0_wreg(rdev, (0x12), (data));
7323
7324 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1)eg_pif_phy0_rreg(rdev, (0x13));
7325 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK(0x7 << 10) | PLL_POWER_STATE_IN_TXS2_1_MASK(0x7 << 7));
7326 data |= PLL_POWER_STATE_IN_OFF_1(7)((7) << 10) | PLL_POWER_STATE_IN_TXS2_1(7)((7) << 7);
7327 if (orig != data)
7328 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data)eg_pif_phy0_wreg(rdev, (0x13), (data));
7329
7330 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0)eg_pif_phy1_rreg(rdev, (0x12));
7331 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK(0x7 << 10) | PLL_POWER_STATE_IN_TXS2_0_MASK(0x7 << 7));
7332 data |= PLL_POWER_STATE_IN_OFF_0(7)((7) << 10) | PLL_POWER_STATE_IN_TXS2_0(7)((7) << 7);
7333 if (orig != data)
7334 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data)eg_pif_phy1_wreg(rdev, (0x12), (data));
7335
7336 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1)eg_pif_phy1_rreg(rdev, (0x13));
7337 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK(0x7 << 10) | PLL_POWER_STATE_IN_TXS2_1_MASK(0x7 << 7));
7338 data |= PLL_POWER_STATE_IN_OFF_1(7)((7) << 10) | PLL_POWER_STATE_IN_TXS2_1(7)((7) << 7);
7339 if (orig != data)
7340 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data)eg_pif_phy1_wreg(rdev, (0x13), (data));
7341
7342 if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
7343 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0)eg_pif_phy0_rreg(rdev, (0x12));
7344 data &= ~PLL_RAMP_UP_TIME_0_MASK(0x7 << 24);
7345 if (orig != data)
7346 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data)eg_pif_phy0_wreg(rdev, (0x12), (data));
7347
7348 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1)eg_pif_phy0_rreg(rdev, (0x13));
7349 data &= ~PLL_RAMP_UP_TIME_1_MASK(0x7 << 24);
7350 if (orig != data)
7351 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data)eg_pif_phy0_wreg(rdev, (0x13), (data));
7352
7353 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2)eg_pif_phy0_rreg(rdev, (0x17));
7354 data &= ~PLL_RAMP_UP_TIME_2_MASK(0x7 << 24);
7355 if (orig != data)
7356 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data)eg_pif_phy0_wreg(rdev, (0x17), (data));
7357
7358 orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3)eg_pif_phy0_rreg(rdev, (0x18));
7359 data &= ~PLL_RAMP_UP_TIME_3_MASK(0x7 << 24);
7360 if (orig != data)
7361 WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data)eg_pif_phy0_wreg(rdev, (0x18), (data));
7362
7363 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0)eg_pif_phy1_rreg(rdev, (0x12));
7364 data &= ~PLL_RAMP_UP_TIME_0_MASK(0x7 << 24);
7365 if (orig != data)
7366 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data)eg_pif_phy1_wreg(rdev, (0x12), (data));
7367
7368 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1)eg_pif_phy1_rreg(rdev, (0x13));
7369 data &= ~PLL_RAMP_UP_TIME_1_MASK(0x7 << 24);
7370 if (orig != data)
7371 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data)eg_pif_phy1_wreg(rdev, (0x13), (data));
7372
7373 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2)eg_pif_phy1_rreg(rdev, (0x17));
7374 data &= ~PLL_RAMP_UP_TIME_2_MASK(0x7 << 24);
7375 if (orig != data)
7376 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data)eg_pif_phy1_wreg(rdev, (0x17), (data));
7377
7378 orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3)eg_pif_phy1_rreg(rdev, (0x18));
7379 data &= ~PLL_RAMP_UP_TIME_3_MASK(0x7 << 24);
7380 if (orig != data)
7381 WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data)eg_pif_phy1_wreg(rdev, (0x18), (data));
7382 }
7383 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL)rdev->pciep_rreg(rdev, (0xa2));
7384 data &= ~LC_DYN_LANES_PWR_STATE_MASK(0x3 << 21);
7385 data |= LC_DYN_LANES_PWR_STATE(3)((3) << 21);
7386 if (orig != data)
7387 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data)rdev->pciep_wreg(rdev, (0xa2), (data));
7388
7389 orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL)eg_pif_phy0_rreg(rdev, (0x10));
7390 data &= ~LS2_EXIT_TIME_MASK(0x7 << 17);
7391 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7392 data |= LS2_EXIT_TIME(5)((5) << 17);
7393 if (orig != data)
7394 WREG32_PIF_PHY0(PB0_PIF_CNTL, data)eg_pif_phy0_wreg(rdev, (0x10), (data));
7395
7396 orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL)eg_pif_phy1_rreg(rdev, (0x10));
7397 data &= ~LS2_EXIT_TIME_MASK(0x7 << 17);
7398 if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7399 data |= LS2_EXIT_TIME(5)((5) << 17);
7400 if (orig != data)
7401 WREG32_PIF_PHY1(PB1_PIF_CNTL, data)eg_pif_phy1_wreg(rdev, (0x10), (data));
7402
7403 if (!disable_clkreq &&
7404 !pci_is_root_bus(rdev->pdev->bus)) {
7405 struct pci_dev *root = rdev->pdev->bus->self;
7406 u32 lnkcap;
7407
7408 clk_req_support = false0;
7409 pcie_capability_read_dword(root, PCI_EXP_LNKCAP0x0c, &lnkcap);
7410 if (lnkcap & PCI_EXP_LNKCAP_CLKPM0x00040000)
7411 clk_req_support = true1;
7412 } else {
7413 clk_req_support = false0;
7414 }
7415
7416 if (clk_req_support) {
7417 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2)rdev->pciep_rreg(rdev, (0xb1));
7418 data |= LC_ALLOW_PDWN_IN_L1(1 << 17) | LC_ALLOW_PDWN_IN_L23(1 << 18);
7419 if (orig != data)
7420 WREG32_PCIE_PORT(PCIE_LC_CNTL2, data)rdev->pciep_wreg(rdev, (0xb1), (data));
7421
7422 orig = data = RREG32(THM_CLK_CNTL)r100_mm_rreg(rdev, (0x66c), 0);
7423 data &= ~(CMON_CLK_SEL_MASK0xFF | TMON_CLK_SEL_MASK0xFF00);
7424 data |= CMON_CLK_SEL(1)((1) << 0) | TMON_CLK_SEL(1)((1) << 8);
7425 if (orig != data)
7426 WREG32(THM_CLK_CNTL, data)r100_mm_wreg(rdev, (0x66c), (data), 0);
7427
7428 orig = data = RREG32(MISC_CLK_CNTL)r100_mm_rreg(rdev, (0x670), 0);
7429 data &= ~(DEEP_SLEEP_CLK_SEL_MASK0xFF | ZCLK_SEL_MASK0xFF00);
7430 data |= DEEP_SLEEP_CLK_SEL(1)((1) << 0) | ZCLK_SEL(1)((1) << 8);
7431 if (orig != data)
7432 WREG32(MISC_CLK_CNTL, data)r100_mm_wreg(rdev, (0x670), (data), 0);
7433
7434 orig = data = RREG32(CG_CLKPIN_CNTL)r100_mm_rreg(rdev, (0x660), 0);
7435 data &= ~BCLK_AS_XCLK(1 << 2);
7436 if (orig != data)
7437 WREG32(CG_CLKPIN_CNTL, data)r100_mm_wreg(rdev, (0x660), (data), 0);
7438
7439 orig = data = RREG32(CG_CLKPIN_CNTL_2)r100_mm_rreg(rdev, (0x664), 0);
7440 data &= ~FORCE_BIF_REFCLK_EN(1 << 3);
7441 if (orig != data)
7442 WREG32(CG_CLKPIN_CNTL_2, data)r100_mm_wreg(rdev, (0x664), (data), 0);
7443
7444 orig = data = RREG32(MPLL_BYPASSCLK_SEL)r100_mm_rreg(rdev, (0x65c), 0);
7445 data &= ~MPLL_CLKOUT_SEL_MASK0xFF00;
7446 data |= MPLL_CLKOUT_SEL(4)((4) << 8);
7447 if (orig != data)
7448 WREG32(MPLL_BYPASSCLK_SEL, data)r100_mm_wreg(rdev, (0x65c), (data), 0);
7449
7450 orig = data = RREG32(SPLL_CNTL_MODE)r100_mm_rreg(rdev, (0x618), 0);
7451 data &= ~SPLL_REFCLK_SEL_MASK(3 << 26);
7452 if (orig != data)
7453 WREG32(SPLL_CNTL_MODE, data)r100_mm_wreg(rdev, (0x618), (data), 0);
7454 }
7455 }
7456 } else {
7457 if (orig != data)
7458 WREG32_PCIE_PORT(PCIE_LC_CNTL, data)rdev->pciep_wreg(rdev, (0xa0), (data));
7459 }
7460
7461 orig = data = RREG32_PCIE(PCIE_CNTL2)rv370_pcie_rreg(rdev, (0x1c));
7462 data |= SLV_MEM_LS_EN(1 << 16) | MST_MEM_LS_EN(1 << 18) | REPLAY_MEM_LS_EN(1 << 19);
7463 if (orig != data)
7464 WREG32_PCIE(PCIE_CNTL2, data)rv370_pcie_wreg(rdev, (0x1c), (data));
7465
7466 if (!disable_l0s) {
7467 data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL)rdev->pciep_rreg(rdev, (0xa3));
7468 if((data & LC_N_FTS_MASK(0xff << 24)) == LC_N_FTS_MASK(0xff << 24)) {
7469 data = RREG32_PCIE(PCIE_LC_STATUS1)rv370_pcie_rreg(rdev, (0x28));
7470 if ((data & LC_REVERSE_XMIT(1 << 1)) && (data & LC_REVERSE_RCVR(1 << 0))) {
7471 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL)rdev->pciep_rreg(rdev, (0xa0));
7472 data &= ~LC_L0S_INACTIVITY_MASK(0xf << 8);
7473 if (orig != data)
7474 WREG32_PCIE_PORT(PCIE_LC_CNTL, data)rdev->pciep_wreg(rdev, (0xa0), (data));
7475 }
7476 }
7477 }
7478}
7479
7480static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
7481{
7482 unsigned i;
7483
7484 /* make sure VCEPLL_CTLREQ is deasserted */
7485 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000008); tmp_ |= ((0) & ~(~0x00000008)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7486
7487 mdelay(10);
7488
7489 /* assert UPLL_CTLREQ */
7490 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000008); tmp_ |= ((0x00000008) & ~(~0x00000008)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7491
7492 /* wait for CTLACK and CTLACK2 to get asserted */
7493 for (i = 0; i < 100; ++i) {
7494 uint32_t mask = UPLL_CTLACK_MASK0x40000000 | UPLL_CTLACK2_MASK0x80000000;
7495 if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL)tn_smc_rreg(rdev, (0xc0030600)) & mask) == mask)
7496 break;
7497 mdelay(10);
7498 }
7499
7500 /* deassert UPLL_CTLREQ */
7501 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000008); tmp_ |= ((0) & ~(~0x00000008)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7502
7503 if (i == 100) {
7504 DRM_ERROR("Timeout setting UVD clocks!\n")__drm_err("Timeout setting UVD clocks!\n");
7505 return -ETIMEDOUT60;
7506 }
7507
7508 return 0;
7509}
7510
7511int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
7512{
7513 unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0;
7514 int r;
7515
7516 /* bypass evclk and ecclk with bclk */
7517 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030601)); tmp_ &=
(~(0x01F00000 | 0x3E000000)); tmp_ |= ((((1) << 20) | (
(1) << 25)) & ~(~(0x01F00000 | 0x3E000000))); tn_smc_wreg
(rdev, (0xc0030601), (tmp_)); } while (0)
7518 EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1),do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030601)); tmp_ &=
(~(0x01F00000 | 0x3E000000)); tmp_ |= ((((1) << 20) | (
(1) << 25)) & ~(~(0x01F00000 | 0x3E000000))); tn_smc_wreg
(rdev, (0xc0030601), (tmp_)); } while (0)
7519 ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK))do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030601)); tmp_ &=
(~(0x01F00000 | 0x3E000000)); tmp_ |= ((((1) << 20) | (
(1) << 25)) & ~(~(0x01F00000 | 0x3E000000))); tn_smc_wreg
(rdev, (0xc0030601), (tmp_)); } while (0)
;
7520
7521 /* put PLL in bypass mode */
7522 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK,do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000004); tmp_ |= ((0x00000004) & ~(~0x00000004)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
7523 ~VCEPLL_BYPASS_EN_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000004); tmp_ |= ((0x00000004) & ~(~0x00000004)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7524
7525 if (!evclk || !ecclk) {
7526 /* keep the Bypass mode, put PLL to sleep */
7527 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000002); tmp_ |= ((0x00000002) & ~(~0x00000002)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
7528 ~VCEPLL_SLEEP_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000002); tmp_ |= ((0x00000002) & ~(~0x00000002)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7529 return 0;
7530 }
7531
7532 r = radeon_uvd_calc_upll_dividers(rdev, evclk, ecclk, 125000, 250000,
7533 16384, 0x03FFFFFF, 0, 128, 5,
7534 &fb_div, &evclk_div, &ecclk_div);
7535 if (r)
7536 return r;
7537
7538 /* set RESET_ANTI_MUX to 0 */
7539 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030604)); tmp_ &=
(~0x00000200); tmp_ |= ((0) & ~(~0x00000200)); tn_smc_wreg
(rdev, (0xc0030604), (tmp_)); } while (0)
;
7540
7541 /* set VCO_MODE to 1 */
7542 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK,do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000600); tmp_ |= ((0x00000600) & ~(~0x00000600)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
7543 ~VCEPLL_VCO_MODE_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000600); tmp_ |= ((0x00000600) & ~(~0x00000600)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7544
7545 /* toggle VCEPLL_SLEEP to 1 then back to 0 */
7546 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000002); tmp_ |= ((0x00000002) & ~(~0x00000002)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
7547 ~VCEPLL_SLEEP_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000002); tmp_ |= ((0x00000002) & ~(~0x00000002)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7548 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000002); tmp_ |= ((0) & ~(~0x00000002)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7549
7550 /* deassert VCEPLL_RESET */
7551 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000001); tmp_ |= ((0) & ~(~0x00000001)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7552
7553 mdelay(1);
7554
7555 r = si_vce_send_vcepll_ctlreq(rdev);
7556 if (r)
7557 return r;
7558
7559 /* assert VCEPLL_RESET again */
7560 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000001); tmp_ |= ((0x00000001) & ~(~0x00000001)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7561
7562 /* disable spread spectrum. */
7563 WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030606)); tmp_ &=
(~0x00000001); tmp_ |= ((0) & ~(~0x00000001)); tn_smc_wreg
(rdev, (0xc0030606), (tmp_)); } while (0)
;
7564
7565 /* set feedback divider */
7566 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, VCEPLL_FB_DIV(fb_div), ~VCEPLL_FB_DIV_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030602)); tmp_ &=
(~0x01FFFFFF); tmp_ |= ((((fb_div) << 0)) & ~(~0x01FFFFFF
)); tn_smc_wreg(rdev, (0xc0030602), (tmp_)); } while (0)
;
7567
7568 /* set ref divider to 0 */
7569 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x003F0000); tmp_ |= ((0) & ~(~0x003F0000)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7570
7571 /* set PDIV_A and PDIV_B */
7572 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030601)); tmp_ &=
(~(0x0000007F | 0x00007F00)); tmp_ |= ((((evclk_div) <<
0) | ((ecclk_div) << 8)) & ~(~(0x0000007F | 0x00007F00
))); tn_smc_wreg(rdev, (0xc0030601), (tmp_)); } while (0)
7573 VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div),do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030601)); tmp_ &=
(~(0x0000007F | 0x00007F00)); tmp_ |= ((((evclk_div) <<
0) | ((ecclk_div) << 8)) & ~(~(0x0000007F | 0x00007F00
))); tn_smc_wreg(rdev, (0xc0030601), (tmp_)); } while (0)
7574 ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK))do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030601)); tmp_ &=
(~(0x0000007F | 0x00007F00)); tmp_ |= ((((evclk_div) <<
0) | ((ecclk_div) << 8)) & ~(~(0x0000007F | 0x00007F00
))); tn_smc_wreg(rdev, (0xc0030601), (tmp_)); } while (0)
;
7575
7576 /* give the PLL some time to settle */
7577 mdelay(15);
7578
7579 /* deassert PLL_RESET */
7580 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000001); tmp_ |= ((0) & ~(~0x00000001)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7581
7582 mdelay(15);
7583
7584 /* switch from bypass mode to normal mode */
7585 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK)do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030600)); tmp_ &=
(~0x00000004); tmp_ |= ((0) & ~(~0x00000004)); tn_smc_wreg
(rdev, (0xc0030600), (tmp_)); } while (0)
;
7586
7587 r = si_vce_send_vcepll_ctlreq(rdev);
7588 if (r)
7589 return r;
7590
7591 /* switch VCLK and DCLK selection */
7592 WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030601)); tmp_ &=
(~(0x01F00000 | 0x3E000000)); tmp_ |= ((((16) << 20) |
((16) << 25)) & ~(~(0x01F00000 | 0x3E000000))); tn_smc_wreg
(rdev, (0xc0030601), (tmp_)); } while (0)
7593 EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16),do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030601)); tmp_ &=
(~(0x01F00000 | 0x3E000000)); tmp_ |= ((((16) << 20) |
((16) << 25)) & ~(~(0x01F00000 | 0x3E000000))); tn_smc_wreg
(rdev, (0xc0030601), (tmp_)); } while (0)
7594 ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK))do { uint32_t tmp_ = tn_smc_rreg(rdev, (0xc0030601)); tmp_ &=
(~(0x01F00000 | 0x3E000000)); tmp_ |= ((((16) << 20) |
((16) << 25)) & ~(~(0x01F00000 | 0x3E000000))); tn_smc_wreg
(rdev, (0xc0030601), (tmp_)); } while (0)
;
7595
7596 mdelay(100);
7597
7598 return 0;
7599}

/usr/src/sys/dev/pci/drm/radeon/radeon.h

1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_H__
29#define __RADEON_H__
30
31/* TODO: Here are things that needs to be done :
32 * - surface allocator & initializer : (bit like scratch reg) should
33 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
34 * related to surface
35 * - WB : write back stuff (do it bit like scratch reg things)
36 * - Vblank : look at Jesse's rework and what we should do
37 * - r600/r700: gart & cp
38 * - cs : clean cs ioctl use bitmap & things like that.
39 * - power management stuff
40 * - Barrier in gart code
41 * - Unmappabled vram ?
42 * - TESTING, TESTING, TESTING
43 */
44
45/* Initialization path:
46 * We expect that acceleration initialization might fail for various
47 * reasons even thought we work hard to make it works on most
48 * configurations. In order to still have a working userspace in such
49 * situation the init path must succeed up to the memory controller
50 * initialization point. Failure before this point are considered as
51 * fatal error. Here is the init callchain :
52 * radeon_device_init perform common structure, mutex initialization
53 * asic_init setup the GPU memory layout and perform all
54 * one time initialization (failure in this
55 * function are considered fatal)
56 * asic_startup setup the GPU acceleration, in order to
57 * follow guideline the first thing this
58 * function should do is setting the GPU
59 * memory controller (only MC setup failure
60 * are considered as fatal)
61 */
62
63#include <linux/atomic.h>
64#include <linux/wait.h>
65#include <linux/list.h>
66#include <linux/kref.h>
67#include <linux/interval_tree.h>
68#include <linux/hashtable.h>
69#include <linux/dma-fence.h>
70
71#ifdef CONFIG_MMU_NOTIFIER
72#include <linux/mmu_notifier.h>
73#endif
74
75#include <drm/ttm/ttm_bo_api.h>
76#include <drm/ttm/ttm_bo_driver.h>
77#include <drm/ttm/ttm_placement.h>
78#include <drm/ttm/ttm_module.h>
79#include <drm/ttm/ttm_execbuf_util.h>
80
81#include <drm/drm_gem.h>
82#include <drm/drm_legacy.h>
83
84#include <dev/wscons/wsconsio.h>
85#include <dev/wscons/wsdisplayvar.h>
86#include <dev/rasops/rasops.h>
87
88#include <dev/pci/pcivar.h>
89
90#ifdef __sparc64__
91#include <machine/fbvar.h>
92#endif
93
94#include "radeon_family.h"
95#include "radeon_mode.h"
96#include "radeon_reg.h"
97
98/*
99 * Modules parameters.
100 */
101extern int radeon_no_wb;
102extern int radeon_modeset;
103extern int radeon_dynclks;
104extern int radeon_r4xx_atom;
105extern int radeon_agpmode;
106extern int radeon_vram_limit;
107extern int radeon_gart_size;
108extern int radeon_benchmarking;
109extern int radeon_testing;
110extern int radeon_connector_table;
111extern int radeon_tv;
112extern int radeon_audio;
113extern int radeon_disp_priority;
114extern int radeon_hw_i2c;
115extern int radeon_pcie_gen2;
116extern int radeon_msi;
117extern int radeon_lockup_timeout;
118extern int radeon_fastfb;
119extern int radeon_dpm;
120extern int radeon_aspm;
121extern int radeon_runtime_pm;
122extern int radeon_hard_reset;
123extern int radeon_vm_size;
124extern int radeon_vm_block_size;
125extern int radeon_deep_color;
126extern int radeon_use_pflipirq;
127extern int radeon_bapm;
128extern int radeon_backlight;
129extern int radeon_auxch;
130extern int radeon_mst;
131extern int radeon_uvd;
132extern int radeon_vce;
133extern int radeon_si_support;
134extern int radeon_cik_support;
135
136/*
137 * Copy from radeon_drv.h so we don't have to include both and have conflicting
138 * symbol;
139 */
140#define RADEON_MAX_USEC_TIMEOUT100000 100000 /* 100 ms */
141#define RADEON_FENCE_JIFFIES_TIMEOUT(hz / 2) (HZhz / 2)
142#define RADEON_USEC_IB_TEST_TIMEOUT1000000 1000000 /* 1s */
143/* RADEON_IB_POOL_SIZE must be a power of 2 */
144#define RADEON_IB_POOL_SIZE16 16
145#define RADEON_DEBUGFS_MAX_COMPONENTS32 32
146#define RADEONFB_CONN_LIMIT4 4
147#define RADEON_BIOS_NUM_SCRATCH8 8
148
149/* internal ring indices */
150/* r1xx+ has gfx CP ring */
151#define RADEON_RING_TYPE_GFX_INDEX0 0
152
153/* cayman has 2 compute CP rings */
154#define CAYMAN_RING_TYPE_CP1_INDEX1 1
155#define CAYMAN_RING_TYPE_CP2_INDEX2 2
156
157/* R600+ has an async dma ring */
158#define R600_RING_TYPE_DMA_INDEX3 3
159/* cayman add a second async dma ring */
160#define CAYMAN_RING_TYPE_DMA1_INDEX4 4
161
162/* R600+ */
163#define R600_RING_TYPE_UVD_INDEX5 5
164
165/* TN+ */
166#define TN_RING_TYPE_VCE1_INDEX6 6
167#define TN_RING_TYPE_VCE2_INDEX7 7
168
169/* max number of rings */
170#define RADEON_NUM_RINGS8 8
171
172/* number of hw syncs before falling back on blocking */
173#define RADEON_NUM_SYNCS4 4
174
175/* hardcode those limit for now */
176#define RADEON_VA_IB_OFFSET(1 << 20) (1 << 20)
177#define RADEON_VA_RESERVED_SIZE(8 << 20) (8 << 20)
178#define RADEON_IB_VM_MAX_SIZE(64 << 10) (64 << 10)
179
180/* hard reset data */
181#define RADEON_ASIC_RESET_DATA0x39d5e86b 0x39d5e86b
182
183/* reset flags */
184#define RADEON_RESET_GFX(1 << 0) (1 << 0)
185#define RADEON_RESET_COMPUTE(1 << 1) (1 << 1)
186#define RADEON_RESET_DMA(1 << 2) (1 << 2)
187#define RADEON_RESET_CP(1 << 3) (1 << 3)
188#define RADEON_RESET_GRBM(1 << 4) (1 << 4)
189#define RADEON_RESET_DMA1(1 << 5) (1 << 5)
190#define RADEON_RESET_RLC(1 << 6) (1 << 6)
191#define RADEON_RESET_SEM(1 << 7) (1 << 7)
192#define RADEON_RESET_IH(1 << 8) (1 << 8)
193#define RADEON_RESET_VMC(1 << 9) (1 << 9)
194#define RADEON_RESET_MC(1 << 10) (1 << 10)
195#define RADEON_RESET_DISPLAY(1 << 11) (1 << 11)
196
197/* CG block flags */
198#define RADEON_CG_BLOCK_GFX(1 << 0) (1 << 0)
199#define RADEON_CG_BLOCK_MC(1 << 1) (1 << 1)
200#define RADEON_CG_BLOCK_SDMA(1 << 2) (1 << 2)
201#define RADEON_CG_BLOCK_UVD(1 << 3) (1 << 3)
202#define RADEON_CG_BLOCK_VCE(1 << 4) (1 << 4)
203#define RADEON_CG_BLOCK_HDP(1 << 5) (1 << 5)
204#define RADEON_CG_BLOCK_BIF(1 << 6) (1 << 6)
205
206/* CG flags */
207#define RADEON_CG_SUPPORT_GFX_MGCG(1 << 0) (1 << 0)
208#define RADEON_CG_SUPPORT_GFX_MGLS(1 << 1) (1 << 1)
209#define RADEON_CG_SUPPORT_GFX_CGCG(1 << 2) (1 << 2)
210#define RADEON_CG_SUPPORT_GFX_CGLS(1 << 3) (1 << 3)
211#define RADEON_CG_SUPPORT_GFX_CGTS(1 << 4) (1 << 4)
212#define RADEON_CG_SUPPORT_GFX_CGTS_LS(1 << 5) (1 << 5)
213#define RADEON_CG_SUPPORT_GFX_CP_LS(1 << 6) (1 << 6)
214#define RADEON_CG_SUPPORT_GFX_RLC_LS(1 << 7) (1 << 7)
215#define RADEON_CG_SUPPORT_MC_LS(1 << 8) (1 << 8)
216#define RADEON_CG_SUPPORT_MC_MGCG(1 << 9) (1 << 9)
217#define RADEON_CG_SUPPORT_SDMA_LS(1 << 10) (1 << 10)
218#define RADEON_CG_SUPPORT_SDMA_MGCG(1 << 11) (1 << 11)
219#define RADEON_CG_SUPPORT_BIF_LS(1 << 12) (1 << 12)
220#define RADEON_CG_SUPPORT_UVD_MGCG(1 << 13) (1 << 13)
221#define RADEON_CG_SUPPORT_VCE_MGCG(1 << 14) (1 << 14)
222#define RADEON_CG_SUPPORT_HDP_LS(1 << 15) (1 << 15)
223#define RADEON_CG_SUPPORT_HDP_MGCG(1 << 16) (1 << 16)
224
225/* PG flags */
226#define RADEON_PG_SUPPORT_GFX_PG(1 << 0) (1 << 0)
227#define RADEON_PG_SUPPORT_GFX_SMG(1 << 1) (1 << 1)
228#define RADEON_PG_SUPPORT_GFX_DMG(1 << 2) (1 << 2)
229#define RADEON_PG_SUPPORT_UVD(1 << 3) (1 << 3)
230#define RADEON_PG_SUPPORT_VCE(1 << 4) (1 << 4)
231#define RADEON_PG_SUPPORT_CP(1 << 5) (1 << 5)
232#define RADEON_PG_SUPPORT_GDS(1 << 6) (1 << 6)
233#define RADEON_PG_SUPPORT_RLC_SMU_HS(1 << 7) (1 << 7)
234#define RADEON_PG_SUPPORT_SDMA(1 << 8) (1 << 8)
235#define RADEON_PG_SUPPORT_ACP(1 << 9) (1 << 9)
236#define RADEON_PG_SUPPORT_SAMU(1 << 10) (1 << 10)
237
238/* max cursor sizes (in pixels) */
239#define CURSOR_WIDTH64 64
240#define CURSOR_HEIGHT64 64
241
242#define CIK_CURSOR_WIDTH128 128
243#define CIK_CURSOR_HEIGHT128 128
244
245/*
246 * Errata workarounds.
247 */
248enum radeon_pll_errata {
249 CHIP_ERRATA_R300_CG = 0x00000001,
250 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002,
251 CHIP_ERRATA_PLL_DELAY = 0x00000004
252};
253
254
255struct radeon_device;
256
257
258/*
259 * BIOS.
260 */
261bool_Bool radeon_get_bios(struct radeon_device *rdev);
262
263/*
264 * Dummy page
265 */
266struct radeon_dummy_page {
267 uint64_t entry;
268 struct drm_dmamem *dmah;
269 dma_addr_t addr;
270};
271int radeon_dummy_page_init(struct radeon_device *rdev);
272void radeon_dummy_page_fini(struct radeon_device *rdev);
273
274
275/*
276 * Clocks
277 */
278struct radeon_clock {
279 struct radeon_pll p1pll;
280 struct radeon_pll p2pll;
281 struct radeon_pll dcpll;
282 struct radeon_pll spll;
283 struct radeon_pll mpll;
284 /* 10 Khz units */
285 uint32_t default_mclk;
286 uint32_t default_sclk;
287 uint32_t default_dispclk;
288 uint32_t current_dispclk;
289 uint32_t dp_extclk;
290 uint32_t max_pixel_clock;
291 uint32_t vco_freq;
292};
293
294/*
295 * Power management
296 */
297int radeon_pm_init(struct radeon_device *rdev);
298int radeon_pm_late_init(struct radeon_device *rdev);
299void radeon_pm_fini(struct radeon_device *rdev);
300void radeon_pm_compute_clocks(struct radeon_device *rdev);
301void radeon_pm_suspend(struct radeon_device *rdev);
302void radeon_pm_resume(struct radeon_device *rdev);
303void radeon_combios_get_power_modes(struct radeon_device *rdev);
304void radeon_atombios_get_power_modes(struct radeon_device *rdev);
305int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
306 u8 clock_type,
307 u32 clock,
308 bool_Bool strobe_mode,
309 struct atom_clock_dividers *dividers);
310int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
311 u32 clock,
312 bool_Bool strobe_mode,
313 struct atom_mpll_param *mpll_param);
314void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
315int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
316 u16 voltage_level, u8 voltage_type,
317 u32 *gpio_value, u32 *gpio_mask);
318void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
319 u32 eng_clock, u32 mem_clock);
320int radeon_atom_get_voltage_step(struct radeon_device *rdev,
321 u8 voltage_type, u16 *voltage_step);
322int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
323 u16 voltage_id, u16 *voltage);
324int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
325 u16 *voltage,
326 u16 leakage_idx);
327int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
328 u16 *leakage_id);
329int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
330 u16 *vddc, u16 *vddci,
331 u16 virtual_voltage_id,
332 u16 vbios_voltage_id);
333int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
334 u16 virtual_voltage_id,
335 u16 *voltage);
336int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
337 u8 voltage_type,
338 u16 nominal_voltage,
339 u16 *true_voltage);
340int radeon_atom_get_min_voltage(struct radeon_device *rdev,
341 u8 voltage_type, u16 *min_voltage);
342int radeon_atom_get_max_voltage(struct radeon_device *rdev,
343 u8 voltage_type, u16 *max_voltage);
344int radeon_atom_get_voltage_table(struct radeon_device *rdev,
345 u8 voltage_type, u8 voltage_mode,
346 struct atom_voltage_table *voltage_table);
347bool_Bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
348 u8 voltage_type, u8 voltage_mode);
349int radeon_atom_get_svi2_info(struct radeon_device *rdev,
350 u8 voltage_type,
351 u8 *svd_gpio_id, u8 *svc_gpio_id);
352void radeon_atom_update_memory_dll(struct radeon_device *rdev,
353 u32 mem_clock);
354void radeon_atom_set_ac_timing(struct radeon_device *rdev,
355 u32 mem_clock);
356int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
357 u8 module_index,
358 struct atom_mc_reg_table *reg_table);
359int radeon_atom_get_memory_info(struct radeon_device *rdev,
360 u8 module_index, struct atom_memory_info *mem_info);
361int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
362 bool_Bool gddr5, u8 module_index,
363 struct atom_memory_clock_range_table *mclk_range_table);
364int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
365 u16 voltage_id, u16 *voltage);
366void rs690_pm_info(struct radeon_device *rdev);
367extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
368 unsigned *bankh, unsigned *mtaspect,
369 unsigned *tile_split);
370
371/*
372 * Fences.
373 */
374struct radeon_fence_driver {
375 struct radeon_device *rdev;
376 uint32_t scratch_reg;
377 uint64_t gpu_addr;
378 volatile uint32_t *cpu_addr;
379 /* sync_seq is protected by ring emission lock */
380 uint64_t sync_seq[RADEON_NUM_RINGS8];
381 atomic64_t last_seq;
382 bool_Bool initialized, delayed_irq;
383 struct delayed_work lockup_work;
384};
385
386struct radeon_fence {
387 struct dma_fence base;
388
389 struct radeon_device *rdev;
390 uint64_t seq;
391 /* RB, DMA, etc. */
392 unsigned ring;
393 bool_Bool is_vm_update;
394
395 wait_queue_entry_t fence_wake;
396};
397
398int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
399int radeon_fence_driver_init(struct radeon_device *rdev);
400void radeon_fence_driver_fini(struct radeon_device *rdev);
401void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
402int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
403void radeon_fence_process(struct radeon_device *rdev, int ring);
404bool_Bool radeon_fence_signaled(struct radeon_fence *fence);
405long radeon_fence_wait_timeout(struct radeon_fence *fence, bool_Bool interruptible, long timeout);
406int radeon_fence_wait(struct radeon_fence *fence, bool_Bool interruptible);
407int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
408int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
409int radeon_fence_wait_any(struct radeon_device *rdev,
410 struct radeon_fence **fences,
411 bool_Bool intr);
412struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
413void radeon_fence_unref(struct radeon_fence **fence);
414unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
415bool_Bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
416void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
417static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
418 struct radeon_fence *b)
419{
420 if (!a) {
421 return b;
422 }
423
424 if (!b) {
425 return a;
426 }
427
428 BUG_ON(a->ring != b->ring)((!(a->ring != b->ring)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/drm/radeon/radeon.h", 428, "!(a->ring != b->ring)"
))
;
429
430 if (a->seq > b->seq) {
431 return a;
432 } else {
433 return b;
434 }
435}
436
437static inline bool_Bool radeon_fence_is_earlier(struct radeon_fence *a,
438 struct radeon_fence *b)
439{
440 if (!a) {
441 return false0;
442 }
443
444 if (!b) {
445 return true1;
446 }
447
448 BUG_ON(a->ring != b->ring)((!(a->ring != b->ring)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/drm/radeon/radeon.h", 448, "!(a->ring != b->ring)"
))
;
449
450 return a->seq < b->seq;
451}
452
453/*
454 * Tiling registers
455 */
456struct radeon_surface_reg {
457 struct radeon_bo *bo;
458};
459
460#define RADEON_GEM_MAX_SURFACES8 8
461
462/*
463 * TTM.
464 */
465struct radeon_mman {
466 struct ttm_bo_device bdev;
467 bool_Bool initialized;
468
469#if defined(CONFIG_DEBUG_FS)
470 struct dentry *vram;
471 struct dentry *gtt;
472#endif
473};
474
475struct radeon_bo_list {
476 struct radeon_bo *robj;
477 struct ttm_validate_buffer tv;
478 uint64_t gpu_offset;
479 unsigned preferred_domains;
480 unsigned allowed_domains;
481 uint32_t tiling_flags;
482};
483
484/* bo virtual address in a specific vm */
485struct radeon_bo_va {
486 /* protected by bo being reserved */
487 struct list_head bo_list;
488 uint32_t flags;
489 struct radeon_fence *last_pt_update;
490 unsigned ref_count;
491
492 /* protected by vm mutex */
493 struct interval_tree_node it;
494 struct list_head vm_status;
495
496 /* constant after initialization */
497 struct radeon_vm *vm;
498 struct radeon_bo *bo;
499};
500
501struct radeon_bo {
502 /* Protected by gem.mutex */
503 struct list_head list;
504 /* Protected by tbo.reserved */
505 u32 initial_domain;
506 struct ttm_place placements[4];
507 struct ttm_placement placement;
508 struct ttm_buffer_object tbo;
509 struct ttm_bo_kmap_obj kmap;
510 u32 flags;
511 unsigned pin_count;
512 void *kptr;
513 u32 tiling_flags;
514 u32 pitch;
515 int surface_reg;
516 unsigned prime_shared_count;
517 /* list of all virtual address to which this bo
518 * is associated to
519 */
520 struct list_head va;
521 /* Constant after initialization */
522 struct radeon_device *rdev;
523
524 struct ttm_bo_kmap_obj dma_buf_vmap;
525 pid_t pid;
526
527#ifdef CONFIG_MMU_NOTIFIER
528 struct mmu_interval_notifier notifier;
529#endif
530};
531#define gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
container_of((gobj), struct radeon_bo, tbo.base)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
532
533int radeon_gem_debugfs_init(struct radeon_device *rdev);
534
535/* sub-allocation manager, it has to be protected by another lock.
536 * By conception this is an helper for other part of the driver
537 * like the indirect buffer or semaphore, which both have their
538 * locking.
539 *
540 * Principe is simple, we keep a list of sub allocation in offset
541 * order (first entry has offset == 0, last entry has the highest
542 * offset).
543 *
544 * When allocating new object we first check if there is room at
545 * the end total_size - (last_object_offset + last_object_size) >=
546 * alloc_size. If so we allocate new object there.
547 *
548 * When there is not enough room at the end, we start waiting for
549 * each sub object until we reach object_offset+object_size >=
550 * alloc_size, this object then become the sub object we return.
551 *
552 * Alignment can't be bigger than page size.
553 *
554 * Hole are not considered for allocation to keep things simple.
555 * Assumption is that there won't be hole (all object on same
556 * alignment).
557 */
558struct radeon_sa_manager {
559 wait_queue_head_t wq;
560 struct radeon_bo *bo;
561 struct list_head *hole;
562 struct list_head flist[RADEON_NUM_RINGS8];
563 struct list_head olist;
564 unsigned size;
565 uint64_t gpu_addr;
566 void *cpu_ptr;
567 uint32_t domain;
568 uint32_t align;
569};
570
571struct radeon_sa_bo;
572
573/* sub-allocation buffer */
574struct radeon_sa_bo {
575 struct list_head olist;
576 struct list_head flist;
577 struct radeon_sa_manager *manager;
578 unsigned soffset;
579 unsigned eoffset;
580 struct radeon_fence *fence;
581};
582
583/*
584 * GEM objects.
585 */
586struct radeon_gem {
587 struct rwlock mutex;
588 struct list_head objects;
589};
590
591int radeon_gem_init(struct radeon_device *rdev);
592void radeon_gem_fini(struct radeon_device *rdev);
593int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
594 int alignment, int initial_domain,
595 u32 flags, bool_Bool kernel,
596 struct drm_gem_object **obj);
597
598int radeon_mode_dumb_create(struct drm_file *file_priv,
599 struct drm_device *dev,
600 struct drm_mode_create_dumb *args);
601int radeon_mode_dumb_mmap(struct drm_file *filp,
602 struct drm_device *dev,
603 uint32_t handle, uint64_t *offset_p);
604
605/*
606 * Semaphores.
607 */
608struct radeon_semaphore {
609 struct radeon_sa_bo *sa_bo;
610 signed waiters;
611 uint64_t gpu_addr;
612};
613
614int radeon_semaphore_create(struct radeon_device *rdev,
615 struct radeon_semaphore **semaphore);
616bool_Bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
617 struct radeon_semaphore *semaphore);
618bool_Bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
619 struct radeon_semaphore *semaphore);
620void radeon_semaphore_free(struct radeon_device *rdev,
621 struct radeon_semaphore **semaphore,
622 struct radeon_fence *fence);
623
624/*
625 * Synchronization
626 */
627struct radeon_sync {
628 struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS4];
629 struct radeon_fence *sync_to[RADEON_NUM_RINGS8];
630 struct radeon_fence *last_vm_update;
631};
632
633void radeon_sync_create(struct radeon_sync *sync);
634void radeon_sync_fence(struct radeon_sync *sync,
635 struct radeon_fence *fence);
636int radeon_sync_resv(struct radeon_device *rdev,
637 struct radeon_sync *sync,
638 struct dma_resv *resv,
639 bool_Bool shared);
640int radeon_sync_rings(struct radeon_device *rdev,
641 struct radeon_sync *sync,
642 int waiting_ring);
643void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync,
644 struct radeon_fence *fence);
645
646/*
647 * GART structures, functions & helpers
648 */
649struct radeon_mc;
650
651#define RADEON_GPU_PAGE_SIZE4096 4096
652#define RADEON_GPU_PAGE_MASK(4096 - 1) (RADEON_GPU_PAGE_SIZE4096 - 1)
653#define RADEON_GPU_PAGE_SHIFT12 12
654#define RADEON_GPU_PAGE_ALIGN(a)(((a) + (4096 - 1)) & ~(4096 - 1)) (((a) + RADEON_GPU_PAGE_MASK(4096 - 1)) & ~RADEON_GPU_PAGE_MASK(4096 - 1))
655
656#define RADEON_GART_PAGE_DUMMY0 0
657#define RADEON_GART_PAGE_VALID(1 << 0) (1 << 0)
658#define RADEON_GART_PAGE_READ(1 << 1) (1 << 1)
659#define RADEON_GART_PAGE_WRITE(1 << 2) (1 << 2)
660#define RADEON_GART_PAGE_SNOOP(1 << 3) (1 << 3)
661
662struct radeon_gart {
663 dma_addr_t table_addr;
664 struct drm_dmamem *dmah;
665 struct radeon_bo *robj;
666 void *ptr;
667 unsigned num_gpu_pages;
668 unsigned num_cpu_pages;
669 unsigned table_size;
670 struct vm_page **pages;
671 uint64_t *pages_entry;
672 bool_Bool ready;
673};
674
675int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
676void radeon_gart_table_ram_free(struct radeon_device *rdev);
677int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
678void radeon_gart_table_vram_free(struct radeon_device *rdev);
679int radeon_gart_table_vram_pin(struct radeon_device *rdev);
680void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
681int radeon_gart_init(struct radeon_device *rdev);
682void radeon_gart_fini(struct radeon_device *rdev);
683void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
684 int pages);
685int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
686 int pages, struct vm_page **pagelist,
687 dma_addr_t *dma_addr, uint32_t flags);
688
689
690/*
691 * GPU MC structures, functions & helpers
692 */
693struct radeon_mc {
694 resource_size_t aper_size;
695 resource_size_t aper_base;
696 resource_size_t agp_base;
697 /* for some chips with <= 32MB we need to lie
698 * about vram size near mc fb location */
699 u64 mc_vram_size;
700 u64 visible_vram_size;
701 u64 gtt_size;
702 u64 gtt_start;
703 u64 gtt_end;
704 u64 vram_start;
705 u64 vram_end;
706 unsigned vram_width;
707 u64 real_vram_size;
708 int vram_mtrr;
709 bool_Bool vram_is_ddr;
710 bool_Bool igp_sideport_enabled;
711 u64 gtt_base_align;
712 u64 mc_mask;
713};
714
715bool_Bool radeon_combios_sideport_present(struct radeon_device *rdev);
716bool_Bool radeon_atombios_sideport_present(struct radeon_device *rdev);
717
718/*
719 * GPU scratch registers structures, functions & helpers
720 */
721struct radeon_scratch {
722 unsigned num_reg;
723 uint32_t reg_base;
724 bool_Bool free[32];
725 uint32_t reg[32];
726};
727
728int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
729void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
730
731/*
732 * GPU doorbell structures, functions & helpers
733 */
734#define RADEON_MAX_DOORBELLS1024 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */
735
736struct radeon_doorbell {
737 /* doorbell mmio */
738 resource_size_t base;
739 resource_size_t size;
740 u32 __iomem *ptr;
741 bus_space_handle_t bsh;
742 u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
743 DECLARE_BITMAP(used, RADEON_MAX_DOORBELLS)unsigned long used[((((1024)) + ((8 * sizeof(long)) - 1)) / (
8 * sizeof(long)))];
;
744};
745
746int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
747void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
748
749/*
750 * IRQS.
751 */
752
753struct radeon_flip_work {
754 struct work_struct flip_work;
755 struct work_struct unpin_work;
756 struct radeon_device *rdev;
757 int crtc_id;
758 u32 target_vblank;
759 uint64_t base;
760 struct drm_pending_vblank_event *event;
761 struct radeon_bo *old_rbo;
762 struct dma_fence *fence;
763 bool_Bool async;
764};
765
766struct r500_irq_stat_regs {
767 u32 disp_int;
768 u32 hdmi0_status;
769};
770
771struct r600_irq_stat_regs {
772 u32 disp_int;
773 u32 disp_int_cont;
774 u32 disp_int_cont2;
775 u32 d1grph_int;
776 u32 d2grph_int;
777 u32 hdmi0_status;
778 u32 hdmi1_status;
779};
780
781struct evergreen_irq_stat_regs {
782 u32 disp_int[6];
783 u32 grph_int[6];
784 u32 afmt_status[6];
785};
786
787struct cik_irq_stat_regs {
788 u32 disp_int;
789 u32 disp_int_cont;
790 u32 disp_int_cont2;
791 u32 disp_int_cont3;
792 u32 disp_int_cont4;
793 u32 disp_int_cont5;
794 u32 disp_int_cont6;
795 u32 d1grph_int;
796 u32 d2grph_int;
797 u32 d3grph_int;
798 u32 d4grph_int;
799 u32 d5grph_int;
800 u32 d6grph_int;
801};
802
803union radeon_irq_stat_regs {
804 struct r500_irq_stat_regs r500;
805 struct r600_irq_stat_regs r600;
806 struct evergreen_irq_stat_regs evergreen;
807 struct cik_irq_stat_regs cik;
808};
809
810struct radeon_irq {
811 bool_Bool installed;
812 spinlock_t lock;
813 atomic_t ring_int[RADEON_NUM_RINGS8];
814 bool_Bool crtc_vblank_int[RADEON_MAX_CRTCS6];
815 atomic_t pflip[RADEON_MAX_CRTCS6];
816 wait_queue_head_t vblank_queue;
817 bool_Bool hpd[RADEON_MAX_HPD_PINS7];
818 bool_Bool afmt[RADEON_MAX_AFMT_BLOCKS7];
819 union radeon_irq_stat_regs stat_regs;
820 bool_Bool dpm_thermal;
821};
822
823int radeon_irq_kms_init(struct radeon_device *rdev);
824void radeon_irq_kms_fini(struct radeon_device *rdev);
825void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
826bool_Bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring);
827void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
828void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
829void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
830void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
831void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
832void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
833void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
834
835/*
836 * CP & rings.
837 */
838
839struct radeon_ib {
840 struct radeon_sa_bo *sa_bo;
841 uint32_t length_dw;
842 uint64_t gpu_addr;
843 uint32_t *ptr;
844 int ring;
845 struct radeon_fence *fence;
846 struct radeon_vm *vm;
847 bool_Bool is_const_ib;
848 struct radeon_sync sync;
849};
850
851struct radeon_ring {
852 struct radeon_bo *ring_obj;
853 volatile uint32_t *ring;
854 unsigned rptr_offs;
855 unsigned rptr_save_reg;
856 u64 next_rptr_gpu_addr;
857 volatile u32 *next_rptr_cpu_addr;
858 unsigned wptr;
859 unsigned wptr_old;
860 unsigned ring_size;
861 unsigned ring_free_dw;
862 int count_dw;
863 atomic_t last_rptr;
864 atomic64_t last_activity;
865 uint64_t gpu_addr;
866 uint32_t align_mask;
867 uint32_t ptr_mask;
868 bool_Bool ready;
869 u32 nop;
870 u32 idx;
871 u64 last_semaphore_signal_addr;
872 u64 last_semaphore_wait_addr;
873 /* for CIK queues */
874 u32 me;
875 u32 pipe;
876 u32 queue;
877 struct radeon_bo *mqd_obj;
878 u32 doorbell_index;
879 unsigned wptr_offs;
880};
881
882struct radeon_mec {
883 struct radeon_bo *hpd_eop_obj;
884 u64 hpd_eop_gpu_addr;
885 u32 num_pipe;
886 u32 num_mec;
887 u32 num_queue;
888};
889
890/*
891 * VM
892 */
893
894/* maximum number of VMIDs */
895#define RADEON_NUM_VM16 16
896
897/* number of entries in page table */
898#define RADEON_VM_PTE_COUNT(1 << radeon_vm_block_size) (1 << radeon_vm_block_size)
899
900/* PTBs (Page Table Blocks) need to be aligned to 32K */
901#define RADEON_VM_PTB_ALIGN_SIZE32768 32768
902#define RADEON_VM_PTB_ALIGN_MASK(32768 - 1) (RADEON_VM_PTB_ALIGN_SIZE32768 - 1)
903#define RADEON_VM_PTB_ALIGN(a)(((a) + (32768 - 1)) & ~(32768 - 1)) (((a) + RADEON_VM_PTB_ALIGN_MASK(32768 - 1)) & ~RADEON_VM_PTB_ALIGN_MASK(32768 - 1))
904
905#define R600_PTE_VALID(1 << 0) (1 << 0)
906#define R600_PTE_SYSTEM(1 << 1) (1 << 1)
907#define R600_PTE_SNOOPED(1 << 2) (1 << 2)
908#define R600_PTE_READABLE(1 << 5) (1 << 5)
909#define R600_PTE_WRITEABLE(1 << 6) (1 << 6)
910
911/* PTE (Page Table Entry) fragment field for different page sizes */
912#define R600_PTE_FRAG_4KB(0 << 7) (0 << 7)
913#define R600_PTE_FRAG_64KB(4 << 7) (4 << 7)
914#define R600_PTE_FRAG_256KB(6 << 7) (6 << 7)
915
916/* flags needed to be set so we can copy directly from the GART table */
917#define R600_PTE_GART_MASK( (1 << 5) | (1 << 6) | (1 << 1) | (1 <<
0) )
( R600_PTE_READABLE(1 << 5) | R600_PTE_WRITEABLE(1 << 6) | \
918 R600_PTE_SYSTEM(1 << 1) | R600_PTE_VALID(1 << 0) )
919
920struct radeon_vm_pt {
921 struct radeon_bo *bo;
922 uint64_t addr;
923};
924
925struct radeon_vm_id {
926 unsigned id;
927 uint64_t pd_gpu_addr;
928 /* last flushed PD/PT update */
929 struct radeon_fence *flushed_updates;
930 /* last use of vmid */
931 struct radeon_fence *last_id_use;
932};
933
934struct radeon_vm {
935 struct rwlock mutex;
936
937 struct rb_root_cached va;
938
939 /* protecting invalidated and freed */
940 spinlock_t status_lock;
941
942 /* BOs moved, but not yet updated in the PT */
943 struct list_head invalidated;
944
945 /* BOs freed, but not yet updated in the PT */
946 struct list_head freed;
947
948 /* BOs cleared in the PT */
949 struct list_head cleared;
950
951 /* contains the page directory */
952 struct radeon_bo *page_directory;
953 unsigned max_pde_used;
954
955 /* array of page tables, one for each page directory entry */
956 struct radeon_vm_pt *page_tables;
957
958 struct radeon_bo_va *ib_bo_va;
959
960 /* for id and flush management per ring */
961 struct radeon_vm_id ids[RADEON_NUM_RINGS8];
962};
963
964struct radeon_vm_manager {
965 struct radeon_fence *active[RADEON_NUM_VM16];
966 uint32_t max_pfn;
967 /* number of VMIDs */
968 unsigned nvm;
969 /* vram base address for page table entry */
970 u64 vram_base_offset;
971 /* is vm enabled? */
972 bool_Bool enabled;
973 /* for hw to save the PD addr on suspend/resume */
974 uint32_t saved_table_addr[RADEON_NUM_VM16];
975};
976
977/*
978 * file private structure
979 */
980struct radeon_fpriv {
981 struct radeon_vm vm;
982};
983
984/*
985 * R6xx+ IH ring
986 */
987struct r600_ih {
988 struct radeon_bo *ring_obj;
989 volatile uint32_t *ring;
990 unsigned rptr;
991 unsigned ring_size;
992 uint64_t gpu_addr;
993 uint32_t ptr_mask;
994 atomic_t lock;
995 bool_Bool enabled;
996};
997
998/*
999 * RLC stuff
1000 */
1001#include "clearstate_defs.h"
1002
1003struct radeon_rlc {
1004 /* for power gating */
1005 struct radeon_bo *save_restore_obj;
1006 uint64_t save_restore_gpu_addr;
1007 volatile uint32_t *sr_ptr;
1008 const u32 *reg_list;
1009 u32 reg_list_size;
1010 /* for clear state */
1011 struct radeon_bo *clear_state_obj;
1012 uint64_t clear_state_gpu_addr;
1013 volatile uint32_t *cs_ptr;
1014 const struct cs_section_def *cs_data;
1015 u32 clear_state_size;
1016 /* for cp tables */
1017 struct radeon_bo *cp_table_obj;
1018 uint64_t cp_table_gpu_addr;
1019 volatile uint32_t *cp_table_ptr;
1020 u32 cp_table_size;
1021};
1022
1023int radeon_ib_get(struct radeon_device *rdev, int ring,
1024 struct radeon_ib *ib, struct radeon_vm *vm,
1025 unsigned size);
1026void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
1027int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
1028 struct radeon_ib *const_ib, bool_Bool hdp_flush);
1029int radeon_ib_pool_init(struct radeon_device *rdev);
1030void radeon_ib_pool_fini(struct radeon_device *rdev);
1031int radeon_ib_ring_tests(struct radeon_device *rdev);
1032/* Ring access between begin & end cannot sleep */
1033bool_Bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
1034 struct radeon_ring *ring);
1035void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
1036int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
1037int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
1038void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
1039 bool_Bool hdp_flush);
1040void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
1041 bool_Bool hdp_flush);
1042void radeon_ring_undo(struct radeon_ring *ring);
1043void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
1044int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
1045void radeon_ring_lockup_update(struct radeon_device *rdev,
1046 struct radeon_ring *ring);
1047bool_Bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
1048unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
1049 uint32_t **data);
1050int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
1051 unsigned size, uint32_t *data);
1052int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
1053 unsigned rptr_offs, u32 nop);
1054void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
1055
1056
1057/* r600 async dma */
1058void r600_dma_stop(struct radeon_device *rdev);
1059int r600_dma_resume(struct radeon_device *rdev);
1060void r600_dma_fini(struct radeon_device *rdev);
1061
1062void cayman_dma_stop(struct radeon_device *rdev);
1063int cayman_dma_resume(struct radeon_device *rdev);
1064void cayman_dma_fini(struct radeon_device *rdev);
1065
1066/*
1067 * CS.
1068 */
1069struct radeon_cs_chunk {
1070 uint32_t length_dw;
1071 uint32_t *kdata;
1072 void __user *user_ptr;
1073};
1074
1075struct radeon_cs_parser {
1076 struct device *dev;
1077 struct radeon_device *rdev;
1078 struct drm_file *filp;
1079 /* chunks */
1080 unsigned nchunks;
1081 struct radeon_cs_chunk *chunks;
1082 uint64_t *chunks_array;
1083 /* IB */
1084 unsigned idx;
1085 /* relocations */
1086 unsigned nrelocs;
1087 struct radeon_bo_list *relocs;
1088 struct radeon_bo_list *vm_bos;
1089 struct list_head validated;
1090 unsigned dma_reloc_idx;
1091 /* indices of various chunks */
1092 struct radeon_cs_chunk *chunk_ib;
1093 struct radeon_cs_chunk *chunk_relocs;
1094 struct radeon_cs_chunk *chunk_flags;
1095 struct radeon_cs_chunk *chunk_const_ib;
1096 struct radeon_ib ib;
1097 struct radeon_ib const_ib;
1098 void *track;
1099 unsigned family;
1100 int parser_error;
1101 u32 cs_flags;
1102 u32 ring;
1103 s32 priority;
1104 struct ww_acquire_ctx ticket;
1105};
1106
1107static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
1108{
1109 struct radeon_cs_chunk *ibc = p->chunk_ib;
1110
1111 if (ibc->kdata)
1112 return ibc->kdata[idx];
1113 return p->ib.ptr[idx];
1114}
1115
1116
1117struct radeon_cs_packet {
1118 unsigned idx;
1119 unsigned type;
1120 unsigned reg;
1121 unsigned opcode;
1122 int count;
1123 unsigned one_reg_wr;
1124};
1125
1126typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
1127 struct radeon_cs_packet *pkt,
1128 unsigned idx, unsigned reg);
1129typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
1130 struct radeon_cs_packet *pkt);
1131
1132
1133/*
1134 * AGP
1135 */
1136int radeon_agp_init(struct radeon_device *rdev);
1137void radeon_agp_resume(struct radeon_device *rdev);
1138void radeon_agp_suspend(struct radeon_device *rdev);
1139void radeon_agp_fini(struct radeon_device *rdev);
1140
1141
1142/*
1143 * Writeback
1144 */
1145struct radeon_wb {
1146 struct radeon_bo *wb_obj;
1147 volatile uint32_t *wb;
1148 uint64_t gpu_addr;
1149 bool_Bool enabled;
1150 bool_Bool use_event;
1151};
1152
1153#define RADEON_WB_SCRATCH_OFFSET0 0
1154#define RADEON_WB_RING0_NEXT_RPTR256 256
1155#define RADEON_WB_CP_RPTR_OFFSET1024 1024
1156#define RADEON_WB_CP1_RPTR_OFFSET1280 1280
1157#define RADEON_WB_CP2_RPTR_OFFSET1536 1536
1158#define R600_WB_DMA_RPTR_OFFSET1792 1792
1159#define R600_WB_IH_WPTR_OFFSET2048 2048
1160#define CAYMAN_WB_DMA1_RPTR_OFFSET2304 2304
1161#define R600_WB_EVENT_OFFSET3072 3072
1162#define CIK_WB_CP1_WPTR_OFFSET3328 3328
1163#define CIK_WB_CP2_WPTR_OFFSET3584 3584
1164#define R600_WB_DMA_RING_TEST_OFFSET3588 3588
1165#define CAYMAN_WB_DMA1_RING_TEST_OFFSET3592 3592
1166
1167/**
1168 * struct radeon_pm - power management datas
1169 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
1170 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880)
1171 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880)
1172 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880)
1173 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880)
1174 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP)
1175 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
1176 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
1177 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
1178 * @sclk: GPU clock Mhz (core bandwidth depends of this clock)
1179 * @needed_bandwidth: current bandwidth needs
1180 *
1181 * It keeps track of various data needed to take powermanagement decision.
1182 * Bandwidth need is used to determine minimun clock of the GPU and memory.
1183 * Equation between gpu/memory clock and available bandwidth is hw dependent
1184 * (type of memory, bus size, efficiency, ...)
1185 */
1186
1187enum radeon_pm_method {
1188 PM_METHOD_PROFILE,
1189 PM_METHOD_DYNPM,
1190 PM_METHOD_DPM,
1191};
1192
1193enum radeon_dynpm_state {
1194 DYNPM_STATE_DISABLED,
1195 DYNPM_STATE_MINIMUM,
1196 DYNPM_STATE_PAUSED,
1197 DYNPM_STATE_ACTIVE,
1198 DYNPM_STATE_SUSPENDED,
1199};
1200enum radeon_dynpm_action {
1201 DYNPM_ACTION_NONE,
1202 DYNPM_ACTION_MINIMUM,
1203 DYNPM_ACTION_DOWNCLOCK,
1204 DYNPM_ACTION_UPCLOCK,
1205 DYNPM_ACTION_DEFAULT
1206};
1207
1208enum radeon_voltage_type {
1209 VOLTAGE_NONE = 0,
1210 VOLTAGE_GPIO,
1211 VOLTAGE_VDDC,
1212 VOLTAGE_SW
1213};
1214
1215enum radeon_pm_state_type {
1216 /* not used for dpm */
1217 POWER_STATE_TYPE_DEFAULT,
1218 POWER_STATE_TYPE_POWERSAVE,
1219 /* user selectable states */
1220 POWER_STATE_TYPE_BATTERY,
1221 POWER_STATE_TYPE_BALANCED,
1222 POWER_STATE_TYPE_PERFORMANCE,
1223 /* internal states */
1224 POWER_STATE_TYPE_INTERNAL_UVD,
1225 POWER_STATE_TYPE_INTERNAL_UVD_SD,
1226 POWER_STATE_TYPE_INTERNAL_UVD_HD,
1227 POWER_STATE_TYPE_INTERNAL_UVD_HD2,
1228 POWER_STATE_TYPE_INTERNAL_UVD_MVC,
1229 POWER_STATE_TYPE_INTERNAL_BOOT,
1230 POWER_STATE_TYPE_INTERNAL_THERMAL,
1231 POWER_STATE_TYPE_INTERNAL_ACPI,
1232 POWER_STATE_TYPE_INTERNAL_ULV,
1233 POWER_STATE_TYPE_INTERNAL_3DPERF,
1234};
1235
1236enum radeon_pm_profile_type {
1237 PM_PROFILE_DEFAULT,
1238 PM_PROFILE_AUTO,
1239 PM_PROFILE_LOW,
1240 PM_PROFILE_MID,
1241 PM_PROFILE_HIGH,
1242};
1243
1244#define PM_PROFILE_DEFAULT_IDX0 0
1245#define PM_PROFILE_LOW_SH_IDX1 1
1246#define PM_PROFILE_MID_SH_IDX2 2
1247#define PM_PROFILE_HIGH_SH_IDX3 3
1248#define PM_PROFILE_LOW_MH_IDX4 4
1249#define PM_PROFILE_MID_MH_IDX5 5
1250#define PM_PROFILE_HIGH_MH_IDX6 6
1251#define PM_PROFILE_MAX7 7
1252
1253struct radeon_pm_profile {
1254 int dpms_off_ps_idx;
1255 int dpms_on_ps_idx;
1256 int dpms_off_cm_idx;
1257 int dpms_on_cm_idx;
1258};
1259
1260enum radeon_int_thermal_type {
1261 THERMAL_TYPE_NONE,
1262 THERMAL_TYPE_EXTERNAL,
1263 THERMAL_TYPE_EXTERNAL_GPIO,
1264 THERMAL_TYPE_RV6XX,
1265 THERMAL_TYPE_RV770,
1266 THERMAL_TYPE_ADT7473_WITH_INTERNAL,
1267 THERMAL_TYPE_EVERGREEN,
1268 THERMAL_TYPE_SUMO,
1269 THERMAL_TYPE_NI,
1270 THERMAL_TYPE_SI,
1271 THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1272 THERMAL_TYPE_CI,
1273 THERMAL_TYPE_KV,
1274};
1275
1276struct radeon_voltage {
1277 enum radeon_voltage_type type;
1278 /* gpio voltage */
1279 struct radeon_gpio_rec gpio;
1280 u32 delay; /* delay in usec from voltage drop to sclk change */
1281 bool_Bool active_high; /* voltage drop is active when bit is high */
1282 /* VDDC voltage */
1283 u8 vddc_id; /* index into vddc voltage table */
1284 u8 vddci_id; /* index into vddci voltage table */
1285 bool_Bool vddci_enabled;
1286 /* r6xx+ sw */
1287 u16 voltage;
1288 /* evergreen+ vddci */
1289 u16 vddci;
1290};
1291
1292/* clock mode flags */
1293#define RADEON_PM_MODE_NO_DISPLAY(1 << 0) (1 << 0)
1294
1295struct radeon_pm_clock_info {
1296 /* memory clock */
1297 u32 mclk;
1298 /* engine clock */
1299 u32 sclk;
1300 /* voltage info */
1301 struct radeon_voltage voltage;
1302 /* standardized clock flags */
1303 u32 flags;
1304};
1305
1306/* state flags */
1307#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY(1 << 0) (1 << 0)
1308
1309struct radeon_power_state {
1310 enum radeon_pm_state_type type;
1311 struct radeon_pm_clock_info *clock_info;
1312 /* number of valid clock modes in this power state */
1313 int num_clock_modes;
1314 struct radeon_pm_clock_info *default_clock_mode;
1315 /* standardized state flags */
1316 u32 flags;
1317 u32 misc; /* vbios specific flags */
1318 u32 misc2; /* vbios specific flags */
1319 int pcie_lanes; /* pcie lanes */
1320};
1321
1322/*
1323 * Some modes are overclocked by very low value, accept them
1324 */
1325#define RADEON_MODE_OVERCLOCK_MARGIN500 500 /* 5 MHz */
1326
1327enum radeon_dpm_auto_throttle_src {
1328 RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL,
1329 RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL
1330};
1331
1332enum radeon_dpm_event_src {
1333 RADEON_DPM_EVENT_SRC_ANALOG = 0,
1334 RADEON_DPM_EVENT_SRC_EXTERNAL = 1,
1335 RADEON_DPM_EVENT_SRC_DIGITAL = 2,
1336 RADEON_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
1337 RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
1338};
1339
1340#define RADEON_MAX_VCE_LEVELS6 6
1341
1342enum radeon_vce_level {
1343 RADEON_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
1344 RADEON_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
1345 RADEON_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
1346 RADEON_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
1347 RADEON_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
1348 RADEON_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
1349};
1350
1351struct radeon_ps {
1352 u32 caps; /* vbios flags */
1353 u32 class; /* vbios flags */
1354 u32 class2; /* vbios flags */
1355 /* UVD clocks */
1356 u32 vclk;
1357 u32 dclk;
1358 /* VCE clocks */
1359 u32 evclk;
1360 u32 ecclk;
1361 bool_Bool vce_active;
1362 enum radeon_vce_level vce_level;
1363 /* asic priv */
1364 void *ps_priv;
1365};
1366
1367struct radeon_dpm_thermal {
1368 /* thermal interrupt work */
1369 struct work_struct work;
1370 /* low temperature threshold */
1371 int min_temp;
1372 /* high temperature threshold */
1373 int max_temp;
1374 /* was interrupt low to high or high to low */
1375 bool_Bool high_to_low;
1376};
1377
1378enum radeon_clk_action
1379{
1380 RADEON_SCLK_UP = 1,
1381 RADEON_SCLK_DOWN
1382};
1383
1384struct radeon_blacklist_clocks
1385{
1386 u32 sclk;
1387 u32 mclk;
1388 enum radeon_clk_action action;
1389};
1390
1391struct radeon_clock_and_voltage_limits {
1392 u32 sclk;
1393 u32 mclk;
1394 u16 vddc;
1395 u16 vddci;
1396};
1397
1398struct radeon_clock_array {
1399 u32 count;
1400 u32 *values;
1401};
1402
1403struct radeon_clock_voltage_dependency_entry {
1404 u32 clk;
1405 u16 v;
1406};
1407
1408struct radeon_clock_voltage_dependency_table {
1409 u32 count;
1410 struct radeon_clock_voltage_dependency_entry *entries;
1411};
1412
1413union radeon_cac_leakage_entry {
1414 struct {
1415 u16 vddc;
1416 u32 leakage;
1417 };
1418 struct {
1419 u16 vddc1;
1420 u16 vddc2;
1421 u16 vddc3;
1422 };
1423};
1424
1425struct radeon_cac_leakage_table {
1426 u32 count;
1427 union radeon_cac_leakage_entry *entries;
1428};
1429
1430struct radeon_phase_shedding_limits_entry {
1431 u16 voltage;
1432 u32 sclk;
1433 u32 mclk;
1434};
1435
1436struct radeon_phase_shedding_limits_table {
1437 u32 count;
1438 struct radeon_phase_shedding_limits_entry *entries;
1439};
1440
1441struct radeon_uvd_clock_voltage_dependency_entry {
1442 u32 vclk;
1443 u32 dclk;
1444 u16 v;
1445};
1446
1447struct radeon_uvd_clock_voltage_dependency_table {
1448 u8 count;
1449 struct radeon_uvd_clock_voltage_dependency_entry *entries;
1450};
1451
1452struct radeon_vce_clock_voltage_dependency_entry {
1453 u32 ecclk;
1454 u32 evclk;
1455 u16 v;
1456};
1457
1458struct radeon_vce_clock_voltage_dependency_table {
1459 u8 count;
1460 struct radeon_vce_clock_voltage_dependency_entry *entries;
1461};
1462
1463struct radeon_ppm_table {
1464 u8 ppm_design;
1465 u16 cpu_core_number;
1466 u32 platform_tdp;
1467 u32 small_ac_platform_tdp;
1468 u32 platform_tdc;
1469 u32 small_ac_platform_tdc;
1470 u32 apu_tdp;
1471 u32 dgpu_tdp;
1472 u32 dgpu_ulv_power;
1473 u32 tj_max;
1474};
1475
1476struct radeon_cac_tdp_table {
1477 u16 tdp;
1478 u16 configurable_tdp;
1479 u16 tdc;
1480 u16 battery_power_limit;
1481 u16 small_power_limit;
1482 u16 low_cac_leakage;
1483 u16 high_cac_leakage;
1484 u16 maximum_power_delivery_limit;
1485};
1486
1487struct radeon_dpm_dynamic_state {
1488 struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
1489 struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
1490 struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
1491 struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk;
1492 struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1493 struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
1494 struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
1495 struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
1496 struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
1497 struct radeon_clock_array valid_sclk_values;
1498 struct radeon_clock_array valid_mclk_values;
1499 struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
1500 struct radeon_clock_and_voltage_limits max_clock_voltage_on_ac;
1501 u32 mclk_sclk_ratio;
1502 u32 sclk_mclk_delta;
1503 u16 vddc_vddci_delta;
1504 u16 min_vddc_for_pcie_gen2;
1505 struct radeon_cac_leakage_table cac_leakage_table;
1506 struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
1507 struct radeon_ppm_table *ppm_table;
1508 struct radeon_cac_tdp_table *cac_tdp_table;
1509};
1510
1511struct radeon_dpm_fan {
1512 u16 t_min;
1513 u16 t_med;
1514 u16 t_high;
1515 u16 pwm_min;
1516 u16 pwm_med;
1517 u16 pwm_high;
1518 u8 t_hyst;
1519 u32 cycle_delay;
1520 u16 t_max;
1521 u8 control_mode;
1522 u16 default_max_fan_pwm;
1523 u16 default_fan_output_sensitivity;
1524 u16 fan_output_sensitivity;
1525 bool_Bool ucode_fan_control;
1526};
1527
1528enum radeon_pcie_gen {
1529 RADEON_PCIE_GEN1 = 0,
1530 RADEON_PCIE_GEN2 = 1,
1531 RADEON_PCIE_GEN3 = 2,
1532 RADEON_PCIE_GEN_INVALID = 0xffff
1533};
1534
1535enum radeon_dpm_forced_level {
1536 RADEON_DPM_FORCED_LEVEL_AUTO = 0,
1537 RADEON_DPM_FORCED_LEVEL_LOW = 1,
1538 RADEON_DPM_FORCED_LEVEL_HIGH = 2,
1539};
1540
1541struct radeon_vce_state {
1542 /* vce clocks */
1543 u32 evclk;
1544 u32 ecclk;
1545 /* gpu clocks */
1546 u32 sclk;
1547 u32 mclk;
1548 u8 clk_idx;
1549 u8 pstate;
1550};
1551
1552struct radeon_dpm {
1553 struct radeon_ps *ps;
1554 /* number of valid power states */
1555 int num_ps;
1556 /* current power state that is active */
1557 struct radeon_ps *current_ps;
1558 /* requested power state */
1559 struct radeon_ps *requested_ps;
1560 /* boot up power state */
1561 struct radeon_ps *boot_ps;
1562 /* default uvd power state */
1563 struct radeon_ps *uvd_ps;
1564 /* vce requirements */
1565 struct radeon_vce_state vce_states[RADEON_MAX_VCE_LEVELS6];
1566 enum radeon_vce_level vce_level;
1567 enum radeon_pm_state_type state;
1568 enum radeon_pm_state_type user_state;
1569 u32 platform_caps;
1570 u32 voltage_response_time;
1571 u32 backbias_response_time;
1572 void *priv;
1573 u32 new_active_crtcs;
1574 int new_active_crtc_count;
1575 int high_pixelclock_count;
1576 u32 current_active_crtcs;
1577 int current_active_crtc_count;
1578 bool_Bool single_display;
1579 struct radeon_dpm_dynamic_state dyn_state;
1580 struct radeon_dpm_fan fan;
1581 u32 tdp_limit;
1582 u32 near_tdp_limit;
1583 u32 near_tdp_limit_adjusted;
1584 u32 sq_ramping_threshold;
1585 u32 cac_leakage;
1586 u16 tdp_od_limit;
1587 u32 tdp_adjustment;
1588 u16 load_line_slope;
1589 bool_Bool power_control;
1590 bool_Bool ac_power;
1591 /* special states active */
1592 bool_Bool thermal_active;
1593 bool_Bool uvd_active;
1594 bool_Bool vce_active;
1595 /* thermal handling */
1596 struct radeon_dpm_thermal thermal;
1597 /* forced levels */
1598 enum radeon_dpm_forced_level forced_level;
1599 /* track UVD streams */
1600 unsigned sd;
1601 unsigned hd;
1602};
1603
1604void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool_Bool enable);
1605void radeon_dpm_enable_vce(struct radeon_device *rdev, bool_Bool enable);
1606
1607struct radeon_pm {
1608 struct rwlock mutex;
1609 /* write locked while reprogramming mclk */
1610 struct rwlock mclk_lock;
1611 u32 active_crtcs;
1612 int active_crtc_count;
1613 int req_vblank;
1614 bool_Bool vblank_sync;
1615 fixed20_12 max_bandwidth;
1616 fixed20_12 igp_sideport_mclk;
1617 fixed20_12 igp_system_mclk;
1618 fixed20_12 igp_ht_link_clk;
1619 fixed20_12 igp_ht_link_width;
1620 fixed20_12 k8_bandwidth;
1621 fixed20_12 sideport_bandwidth;
1622 fixed20_12 ht_bandwidth;
1623 fixed20_12 core_bandwidth;
1624 fixed20_12 sclk;
1625 fixed20_12 mclk;
1626 fixed20_12 needed_bandwidth;
1627 struct radeon_power_state *power_state;
1628 /* number of valid power states */
1629 int num_power_states;
1630 int current_power_state_index;
1631 int current_clock_mode_index;
1632 int requested_power_state_index;
1633 int requested_clock_mode_index;
1634 int default_power_state_index;
1635 u32 current_sclk;
1636 u32 current_mclk;
1637 u16 current_vddc;
1638 u16 current_vddci;
1639 u32 default_sclk;
1640 u32 default_mclk;
1641 u16 default_vddc;
1642 u16 default_vddci;
1643 struct radeon_i2c_chan *i2c_bus;
1644 /* selected pm method */
1645 enum radeon_pm_method pm_method;
1646 /* dynpm power management */
1647 struct delayed_work dynpm_idle_work;
1648 enum radeon_dynpm_state dynpm_state;
1649 enum radeon_dynpm_action dynpm_planned_action;
1650 unsigned long dynpm_action_timeout;
1651 bool_Bool dynpm_can_upclock;
1652 bool_Bool dynpm_can_downclock;
1653 /* profile-based power management */
1654 enum radeon_pm_profile_type profile;
1655 int profile_index;
1656 struct radeon_pm_profile profiles[PM_PROFILE_MAX7];
1657 /* internal thermal controller on rv6xx+ */
1658 enum radeon_int_thermal_type int_thermal_type;
1659 struct device *int_hwmon_dev;
1660 /* fan control parameters */
1661 bool_Bool no_fan;
1662 u8 fan_pulses_per_revolution;
1663 u8 fan_min_rpm;
1664 u8 fan_max_rpm;
1665 /* dpm */
1666 bool_Bool dpm_enabled;
1667 bool_Bool sysfs_initialized;
1668 struct radeon_dpm dpm;
1669};
1670
1671#define RADEON_PCIE_SPEED_251 1
1672#define RADEON_PCIE_SPEED_502 2
1673#define RADEON_PCIE_SPEED_804 4
1674
1675int radeon_pm_get_type_index(struct radeon_device *rdev,
1676 enum radeon_pm_state_type ps_type,
1677 int instance);
1678/*
1679 * UVD
1680 */
1681#define RADEON_DEFAULT_UVD_HANDLES10 10
1682#define RADEON_MAX_UVD_HANDLES30 30
1683#define RADEON_UVD_STACK_SIZE(200*1024) (200*1024)
1684#define RADEON_UVD_HEAP_SIZE(256*1024) (256*1024)
1685#define RADEON_UVD_SESSION_SIZE(50*1024) (50*1024)
1686
1687struct radeon_uvd {
1688 bool_Bool fw_header_present;
1689 struct radeon_bo *vcpu_bo;
1690 void *cpu_addr;
1691 uint64_t gpu_addr;
1692 unsigned max_handles;
1693 atomic_t handles[RADEON_MAX_UVD_HANDLES30];
1694 struct drm_file *filp[RADEON_MAX_UVD_HANDLES30];
1695 unsigned img_size[RADEON_MAX_UVD_HANDLES30];
1696 struct delayed_work idle_work;
1697};
1698
1699int radeon_uvd_init(struct radeon_device *rdev);
1700void radeon_uvd_fini(struct radeon_device *rdev);
1701int radeon_uvd_suspend(struct radeon_device *rdev);
1702int radeon_uvd_resume(struct radeon_device *rdev);
1703int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
1704 uint32_t handle, struct radeon_fence **fence);
1705int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
1706 uint32_t handle, struct radeon_fence **fence);
1707void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
1708 uint32_t allowed_domains);
1709void radeon_uvd_free_handles(struct radeon_device *rdev,
1710 struct drm_file *filp);
1711int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
1712void radeon_uvd_note_usage(struct radeon_device *rdev);
1713int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
1714 unsigned vclk, unsigned dclk,
1715 unsigned vco_min, unsigned vco_max,
1716 unsigned fb_factor, unsigned fb_mask,
1717 unsigned pd_min, unsigned pd_max,
1718 unsigned pd_even,
1719 unsigned *optimal_fb_div,
1720 unsigned *optimal_vclk_div,
1721 unsigned *optimal_dclk_div);
1722int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
1723 unsigned cg_upll_func_cntl);
1724
1725/*
1726 * VCE
1727 */
1728#define RADEON_MAX_VCE_HANDLES16 16
1729
1730struct radeon_vce {
1731 struct radeon_bo *vcpu_bo;
1732 uint64_t gpu_addr;
1733 unsigned fw_version;
1734 unsigned fb_version;
1735 atomic_t handles[RADEON_MAX_VCE_HANDLES16];
1736 struct drm_file *filp[RADEON_MAX_VCE_HANDLES16];
1737 unsigned img_size[RADEON_MAX_VCE_HANDLES16];
1738 struct delayed_work idle_work;
1739 uint32_t keyselect;
1740};
1741
1742int radeon_vce_init(struct radeon_device *rdev);
1743void radeon_vce_fini(struct radeon_device *rdev);
1744int radeon_vce_suspend(struct radeon_device *rdev);
1745int radeon_vce_resume(struct radeon_device *rdev);
1746int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
1747 uint32_t handle, struct radeon_fence **fence);
1748int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
1749 uint32_t handle, struct radeon_fence **fence);
1750void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
1751void radeon_vce_note_usage(struct radeon_device *rdev);
1752int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
1753int radeon_vce_cs_parse(struct radeon_cs_parser *p);
1754bool_Bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
1755 struct radeon_ring *ring,
1756 struct radeon_semaphore *semaphore,
1757 bool_Bool emit_wait);
1758void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
1759void radeon_vce_fence_emit(struct radeon_device *rdev,
1760 struct radeon_fence *fence);
1761int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
1762int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
1763
1764struct r600_audio_pin {
1765 int channels;
1766 int rate;
1767 int bits_per_sample;
1768 u8 status_bits;
1769 u8 category_code;
1770 u32 offset;
1771 bool_Bool connected;
1772 u32 id;
1773};
1774
1775struct r600_audio {
1776 bool_Bool enabled;
1777 struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS7];
1778 int num_pins;
1779 struct radeon_audio_funcs *hdmi_funcs;
1780 struct radeon_audio_funcs *dp_funcs;
1781 struct radeon_audio_basic_funcs *funcs;
1782};
1783
1784/*
1785 * Benchmarking
1786 */
1787void radeon_benchmark(struct radeon_device *rdev, int test_number);
1788
1789
1790/*
1791 * Testing
1792 */
1793void radeon_test_moves(struct radeon_device *rdev);
1794void radeon_test_ring_sync(struct radeon_device *rdev,
1795 struct radeon_ring *cpA,
1796 struct radeon_ring *cpB);
1797void radeon_test_syncing(struct radeon_device *rdev);
1798
1799/*
1800 * MMU Notifier
1801 */
1802#if defined(CONFIG_MMU_NOTIFIER)
1803int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
1804void radeon_mn_unregister(struct radeon_bo *bo);
1805#else
1806static inline int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
1807{
1808 return -ENODEV19;
1809}
1810static inline void radeon_mn_unregister(struct radeon_bo *bo) {}
1811#endif
1812
1813/*
1814 * Debugfs
1815 */
1816struct radeon_debugfs {
1817 struct drm_info_list *files;
1818 unsigned num_files;
1819};
1820
1821int radeon_debugfs_add_files(struct radeon_device *rdev,
1822 struct drm_info_list *files,
1823 unsigned nfiles);
1824int radeon_debugfs_fence_init(struct radeon_device *rdev);
1825
1826/*
1827 * ASIC ring specific functions.
1828 */
1829struct radeon_asic_ring {
1830 /* ring read/write ptr handling */
1831 u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1832 u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1833 void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1834
1835 /* validating and patching of IBs */
1836 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
1837 int (*cs_parse)(struct radeon_cs_parser *p);
1838
1839 /* command emmit functions */
1840 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
1841 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
1842 void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
1843 bool_Bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1844 struct radeon_semaphore *semaphore, bool_Bool emit_wait);
1845 void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
1846 unsigned vm_id, uint64_t pd_addr);
1847
1848 /* testing functions */
1849 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1850 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1851 bool_Bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1852
1853 /* deprecated */
1854 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1855};
1856
1857/*
1858 * ASIC specific functions.
1859 */
1860struct radeon_asic {
1861 int (*init)(struct radeon_device *rdev);
1862 void (*fini)(struct radeon_device *rdev);
1863 int (*resume)(struct radeon_device *rdev);
1864 int (*suspend)(struct radeon_device *rdev);
1865 void (*vga_set_state)(struct radeon_device *rdev, bool_Bool state);
1866 int (*asic_reset)(struct radeon_device *rdev, bool_Bool hard);
1867 /* Flush the HDP cache via MMIO */
1868 void (*mmio_hdp_flush)(struct radeon_device *rdev);
1869 /* check if 3D engine is idle */
1870 bool_Bool (*gui_idle)(struct radeon_device *rdev);
1871 /* wait for mc_idle */
1872 int (*mc_wait_for_idle)(struct radeon_device *rdev);
1873 /* get the reference clock */
1874 u32 (*get_xclk)(struct radeon_device *rdev);
1875 /* get the gpu clock counter */
1876 uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
1877 /* get register for info ioctl */
1878 int (*get_allowed_info_register)(struct radeon_device *rdev, u32 reg, u32 *val);
1879 /* gart */
1880 struct {
1881 void (*tlb_flush)(struct radeon_device *rdev);
1882 uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
1883 void (*set_page)(struct radeon_device *rdev, unsigned i,
1884 uint64_t entry);
1885 } gart;
1886 struct {
1887 int (*init)(struct radeon_device *rdev);
1888 void (*fini)(struct radeon_device *rdev);
1889 void (*copy_pages)(struct radeon_device *rdev,
1890 struct radeon_ib *ib,
1891 uint64_t pe, uint64_t src,
1892 unsigned count);
1893 void (*write_pages)(struct radeon_device *rdev,
1894 struct radeon_ib *ib,
1895 uint64_t pe,
1896 uint64_t addr, unsigned count,
1897 uint32_t incr, uint32_t flags);
1898 void (*set_pages)(struct radeon_device *rdev,
1899 struct radeon_ib *ib,
1900 uint64_t pe,
1901 uint64_t addr, unsigned count,
1902 uint32_t incr, uint32_t flags);
1903 void (*pad_ib)(struct radeon_ib *ib);
1904 } vm;
1905 /* ring specific callbacks */
1906 const struct radeon_asic_ring *ring[RADEON_NUM_RINGS8];
1907 /* irqs */
1908 struct {
1909 int (*set)(struct radeon_device *rdev);
1910 int (*process)(struct radeon_device *rdev);
1911 } irq;
1912 /* displays */
1913 struct {
1914 /* display watermarks */
1915 void (*bandwidth_update)(struct radeon_device *rdev);
1916 /* get frame count */
1917 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
1918 /* wait for vblank */
1919 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
1920 /* set backlight level */
1921 void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
1922 /* get backlight level */
1923 u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
1924 /* audio callbacks */
1925 void (*hdmi_enable)(struct drm_encoder *encoder, bool_Bool enable);
1926 void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
1927 } display;
1928 /* copy functions for bo handling */
1929 struct {
1930 struct radeon_fence *(*blit)(struct radeon_device *rdev,
1931 uint64_t src_offset,
1932 uint64_t dst_offset,
1933 unsigned num_gpu_pages,
1934 struct dma_resv *resv);
1935 u32 blit_ring_index;
1936 struct radeon_fence *(*dma)(struct radeon_device *rdev,
1937 uint64_t src_offset,
1938 uint64_t dst_offset,
1939 unsigned num_gpu_pages,
1940 struct dma_resv *resv);
1941 u32 dma_ring_index;
1942 /* method used for bo copy */
1943 struct radeon_fence *(*copy)(struct radeon_device *rdev,
1944 uint64_t src_offset,
1945 uint64_t dst_offset,
1946 unsigned num_gpu_pages,
1947 struct dma_resv *resv);
1948 /* ring used for bo copies */
1949 u32 copy_ring_index;
1950 } copy;
1951 /* surfaces */
1952 struct {
1953 int (*set_reg)(struct radeon_device *rdev, int reg,
1954 uint32_t tiling_flags, uint32_t pitch,
1955 uint32_t offset, uint32_t obj_size);
1956 void (*clear_reg)(struct radeon_device *rdev, int reg);
1957 } surface;
1958 /* hotplug detect */
1959 struct {
1960 void (*init)(struct radeon_device *rdev);
1961 void (*fini)(struct radeon_device *rdev);
1962 bool_Bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1963 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1964 } hpd;
1965 /* static power management */
1966 struct {
1967 void (*misc)(struct radeon_device *rdev);
1968 void (*prepare)(struct radeon_device *rdev);
1969 void (*finish)(struct radeon_device *rdev);
1970 void (*init_profile)(struct radeon_device *rdev);
1971 void (*get_dynpm_state)(struct radeon_device *rdev);
1972 uint32_t (*get_engine_clock)(struct radeon_device *rdev);
1973 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
1974 uint32_t (*get_memory_clock)(struct radeon_device *rdev);
1975 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
1976 int (*get_pcie_lanes)(struct radeon_device *rdev);
1977 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
1978 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
1979 int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
1980 int (*set_vce_clocks)(struct radeon_device *rdev, u32 evclk, u32 ecclk);
1981 int (*get_temperature)(struct radeon_device *rdev);
1982 } pm;
1983 /* dynamic power management */
1984 struct {
1985 int (*init)(struct radeon_device *rdev);
1986 void (*setup_asic)(struct radeon_device *rdev);
1987 int (*enable)(struct radeon_device *rdev);
1988 int (*late_enable)(struct radeon_device *rdev);
1989 void (*disable)(struct radeon_device *rdev);
1990 int (*pre_set_power_state)(struct radeon_device *rdev);
1991 int (*set_power_state)(struct radeon_device *rdev);
1992 void (*post_set_power_state)(struct radeon_device *rdev);
1993 void (*display_configuration_changed)(struct radeon_device *rdev);
1994 void (*fini)(struct radeon_device *rdev);
1995 u32 (*get_sclk)(struct radeon_device *rdev, bool_Bool low);
1996 u32 (*get_mclk)(struct radeon_device *rdev, bool_Bool low);
1997 void (*print_power_state)(struct radeon_device *rdev, struct radeon_ps *ps);
1998 void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
1999 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
2000 bool_Bool (*vblank_too_short)(struct radeon_device *rdev);
2001 void (*powergate_uvd)(struct radeon_device *rdev, bool_Bool gate);
2002 void (*enable_bapm)(struct radeon_device *rdev, bool_Bool enable);
2003 void (*fan_ctrl_set_mode)(struct radeon_device *rdev, u32 mode);
2004 u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev);
2005 int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed);
2006 int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed);
2007 u32 (*get_current_sclk)(struct radeon_device *rdev);
2008 u32 (*get_current_mclk)(struct radeon_device *rdev);
2009 } dpm;
2010 /* pageflipping */
2011 struct {
2012 void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base, bool_Bool async);
2013 bool_Bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
2014 } pflip;
2015};
2016
2017/*
2018 * Asic structures
2019 */
2020struct r100_asic {
2021 const unsigned *reg_safe_bm;
2022 unsigned reg_safe_bm_size;
2023 u32 hdp_cntl;
2024};
2025
2026struct r300_asic {
2027 const unsigned *reg_safe_bm;
2028 unsigned reg_safe_bm_size;
2029 u32 resync_scratch;
2030 u32 hdp_cntl;
2031};
2032
2033struct r600_asic {
2034 unsigned max_pipes;
2035 unsigned max_tile_pipes;
2036 unsigned max_simds;
2037 unsigned max_backends;
2038 unsigned max_gprs;
2039 unsigned max_threads;
2040 unsigned max_stack_entries;
2041 unsigned max_hw_contexts;
2042 unsigned max_gs_threads;
2043 unsigned sx_max_export_size;
2044 unsigned sx_max_export_pos_size;
2045 unsigned sx_max_export_smx_size;
2046 unsigned sq_num_cf_insts;
2047 unsigned tiling_nbanks;
2048 unsigned tiling_npipes;
2049 unsigned tiling_group_size;
2050 unsigned tile_config;
2051 unsigned backend_map;
2052 unsigned active_simds;
2053};
2054
2055struct rv770_asic {
2056 unsigned max_pipes;
2057 unsigned max_tile_pipes;
2058 unsigned max_simds;
2059 unsigned max_backends;
2060 unsigned max_gprs;
2061 unsigned max_threads;
2062 unsigned max_stack_entries;
2063 unsigned max_hw_contexts;
2064 unsigned max_gs_threads;
2065 unsigned sx_max_export_size;
2066 unsigned sx_max_export_pos_size;
2067 unsigned sx_max_export_smx_size;
2068 unsigned sq_num_cf_insts;
2069 unsigned sx_num_of_sets;
2070 unsigned sc_prim_fifo_size;
2071 unsigned sc_hiz_tile_fifo_size;
2072 unsigned sc_earlyz_tile_fifo_fize;
2073 unsigned tiling_nbanks;
2074 unsigned tiling_npipes;
2075 unsigned tiling_group_size;
2076 unsigned tile_config;
2077 unsigned backend_map;
2078 unsigned active_simds;
2079};
2080
2081struct evergreen_asic {
2082 unsigned num_ses;
2083 unsigned max_pipes;
2084 unsigned max_tile_pipes;
2085 unsigned max_simds;
2086 unsigned max_backends;
2087 unsigned max_gprs;
2088 unsigned max_threads;
2089 unsigned max_stack_entries;
2090 unsigned max_hw_contexts;
2091 unsigned max_gs_threads;
2092 unsigned sx_max_export_size;
2093 unsigned sx_max_export_pos_size;
2094 unsigned sx_max_export_smx_size;
2095 unsigned sq_num_cf_insts;
2096 unsigned sx_num_of_sets;
2097 unsigned sc_prim_fifo_size;
2098 unsigned sc_hiz_tile_fifo_size;
2099 unsigned sc_earlyz_tile_fifo_size;
2100 unsigned tiling_nbanks;
2101 unsigned tiling_npipes;
2102 unsigned tiling_group_size;
2103 unsigned tile_config;
2104 unsigned backend_map;
2105 unsigned active_simds;
2106};
2107
2108struct cayman_asic {
2109 unsigned max_shader_engines;
2110 unsigned max_pipes_per_simd;
2111 unsigned max_tile_pipes;
2112 unsigned max_simds_per_se;
2113 unsigned max_backends_per_se;
2114 unsigned max_texture_channel_caches;
2115 unsigned max_gprs;
2116 unsigned max_threads;
2117 unsigned max_gs_threads;
2118 unsigned max_stack_entries;
2119 unsigned sx_num_of_sets;
2120 unsigned sx_max_export_size;
2121 unsigned sx_max_export_pos_size;
2122 unsigned sx_max_export_smx_size;
2123 unsigned max_hw_contexts;
2124 unsigned sq_num_cf_insts;
2125 unsigned sc_prim_fifo_size;
2126 unsigned sc_hiz_tile_fifo_size;
2127 unsigned sc_earlyz_tile_fifo_size;
2128
2129 unsigned num_shader_engines;
2130 unsigned num_shader_pipes_per_simd;
2131 unsigned num_tile_pipes;
2132 unsigned num_simds_per_se;
2133 unsigned num_backends_per_se;
2134 unsigned backend_disable_mask_per_asic;
2135 unsigned backend_map;
2136 unsigned num_texture_channel_caches;
2137 unsigned mem_max_burst_length_bytes;
2138 unsigned mem_row_size_in_kb;
2139 unsigned shader_engine_tile_size;
2140 unsigned num_gpus;
2141 unsigned multi_gpu_tile_size;
2142
2143 unsigned tile_config;
2144 unsigned active_simds;
2145};
2146
2147struct si_asic {
2148 unsigned max_shader_engines;
2149 unsigned max_tile_pipes;
2150 unsigned max_cu_per_sh;
2151 unsigned max_sh_per_se;
2152 unsigned max_backends_per_se;
2153 unsigned max_texture_channel_caches;
2154 unsigned max_gprs;
2155 unsigned max_gs_threads;
2156 unsigned max_hw_contexts;
2157 unsigned sc_prim_fifo_size_frontend;
2158 unsigned sc_prim_fifo_size_backend;
2159 unsigned sc_hiz_tile_fifo_size;
2160 unsigned sc_earlyz_tile_fifo_size;
2161
2162 unsigned num_tile_pipes;
2163 unsigned backend_enable_mask;
2164 unsigned backend_disable_mask_per_asic;
2165 unsigned backend_map;
2166 unsigned num_texture_channel_caches;
2167 unsigned mem_max_burst_length_bytes;
2168 unsigned mem_row_size_in_kb;
2169 unsigned shader_engine_tile_size;
2170 unsigned num_gpus;
2171 unsigned multi_gpu_tile_size;
2172
2173 unsigned tile_config;
2174 uint32_t tile_mode_array[32];
2175 uint32_t active_cus;
2176};
2177
2178struct cik_asic {
2179 unsigned max_shader_engines;
2180 unsigned max_tile_pipes;
2181 unsigned max_cu_per_sh;
2182 unsigned max_sh_per_se;
2183 unsigned max_backends_per_se;
2184 unsigned max_texture_channel_caches;
2185 unsigned max_gprs;
2186 unsigned max_gs_threads;
2187 unsigned max_hw_contexts;
2188 unsigned sc_prim_fifo_size_frontend;
2189 unsigned sc_prim_fifo_size_backend;
2190 unsigned sc_hiz_tile_fifo_size;
2191 unsigned sc_earlyz_tile_fifo_size;
2192
2193 unsigned num_tile_pipes;
2194 unsigned backend_enable_mask;
2195 unsigned backend_disable_mask_per_asic;
2196 unsigned backend_map;
2197 unsigned num_texture_channel_caches;
2198 unsigned mem_max_burst_length_bytes;
2199 unsigned mem_row_size_in_kb;
2200 unsigned shader_engine_tile_size;
2201 unsigned num_gpus;
2202 unsigned multi_gpu_tile_size;
2203
2204 unsigned tile_config;
2205 uint32_t tile_mode_array[32];
2206 uint32_t macrotile_mode_array[16];
2207 uint32_t active_cus;
2208};
2209
2210union radeon_asic_config {
2211 struct r300_asic r300;
2212 struct r100_asic r100;
2213 struct r600_asic r600;
2214 struct rv770_asic rv770;
2215 struct evergreen_asic evergreen;
2216 struct cayman_asic cayman;
2217 struct si_asic si;
2218 struct cik_asic cik;
2219};
2220
2221/*
2222 * asic initizalization from radeon_asic.c
2223 */
2224void radeon_agp_disable(struct radeon_device *rdev);
2225int radeon_asic_init(struct radeon_device *rdev);
2226
2227
2228/*
2229 * IOCTL.
2230 */
2231int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
2232 struct drm_file *filp);
2233int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
2234 struct drm_file *filp);
2235int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
2236 struct drm_file *filp);
2237int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
2238 struct drm_file *file_priv);
2239int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
2240 struct drm_file *file_priv);
2241int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
2242 struct drm_file *file_priv);
2243int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
2244 struct drm_file *file_priv);
2245int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
2246 struct drm_file *filp);
2247int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
2248 struct drm_file *filp);
2249int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
2250 struct drm_file *filp);
2251int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
2252 struct drm_file *filp);
2253int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
2254 struct drm_file *filp);
2255int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
2256 struct drm_file *filp);
2257int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
2258int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
2259 struct drm_file *filp);
2260int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
2261 struct drm_file *filp);
2262
2263/* VRAM scratch page for HDP bug, default vram page */
2264struct r600_vram_scratch {
2265 struct radeon_bo *robj;
2266 volatile uint32_t *ptr;
2267 u64 gpu_addr;
2268};
2269
2270/*
2271 * ACPI
2272 */
2273struct radeon_atif_notification_cfg {
2274 bool_Bool enabled;
2275 int command_code;
2276};
2277
2278struct radeon_atif_notifications {
2279 bool_Bool display_switch;
2280 bool_Bool expansion_mode_change;
2281 bool_Bool thermal_state;
2282 bool_Bool forced_power_state;
2283 bool_Bool system_power_state;
2284 bool_Bool display_conf_change;
2285 bool_Bool px_gfx_switch;
2286 bool_Bool brightness_change;
2287 bool_Bool dgpu_display_event;
2288};
2289
2290struct radeon_atif_functions {
2291 bool_Bool system_params;
2292 bool_Bool sbios_requests;
2293 bool_Bool select_active_disp;
2294 bool_Bool lid_state;
2295 bool_Bool get_tv_standard;
2296 bool_Bool set_tv_standard;
2297 bool_Bool get_panel_expansion_mode;
2298 bool_Bool set_panel_expansion_mode;
2299 bool_Bool temperature_change;
2300 bool_Bool graphics_device_types;
2301};
2302
2303struct radeon_atif {
2304 struct radeon_atif_notifications notifications;
2305 struct radeon_atif_functions functions;
2306 struct radeon_atif_notification_cfg notification_cfg;
2307 struct radeon_encoder *encoder_for_bl;
2308};
2309
2310struct radeon_atcs_functions {
2311 bool_Bool get_ext_state;
2312 bool_Bool pcie_perf_req;
2313 bool_Bool pcie_dev_rdy;
2314 bool_Bool pcie_bus_width;
2315};
2316
2317struct radeon_atcs {
2318 struct radeon_atcs_functions functions;
2319};
2320
2321/*
2322 * Core structure, functions and helpers.
2323 */
2324typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
2325typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
2326
2327struct radeon_device {
2328 struct device self;
2329 struct device *dev;
2330 struct drm_device *ddev;
2331 struct pci_dev *pdev;
2332 struct rwlock exclusive_lock;
2333
2334 pci_chipset_tag_t pc;
2335 pcitag_t pa_tag;
2336 pci_intr_handle_t intrh;
2337 bus_space_tag_t iot;
2338 bus_space_tag_t memt;
2339 bus_dma_tag_t dmat;
2340 void *irqh;
2341
2342 void (*switchcb)(void *, int, int);
2343 void *switchcbarg;
2344 void *switchcookie;
2345 struct task switchtask;
2346 struct rasops_info ro;
2347 int console;
2348 int primary;
2349
2350 struct task burner_task;
2351 int burner_fblank;
2352
2353#ifdef __sparc64__
2354 struct sunfb sf;
2355 bus_size_t fb_offset;
2356 bus_space_handle_t memh;
2357#endif
2358
2359 unsigned long fb_aper_offset;
2360 unsigned long fb_aper_size;
2361
2362 /* ASIC */
2363 union radeon_asic_config config;
2364 enum radeon_family family;
2365 unsigned long flags;
2366 int usec_timeout;
2367 enum radeon_pll_errata pll_errata;
2368 int num_gb_pipes;
2369 int num_z_pipes;
2370 int disp_priority;
2371 /* BIOS */
2372 uint8_t *bios;
2373 bool_Bool is_atom_bios;
2374 uint16_t bios_header_start;
2375 struct radeon_bo *stolen_vga_memory;
2376 /* Register mmio */
2377 resource_size_t rmmio_base;
2378 resource_size_t rmmio_size;
2379 /* protects concurrent MM_INDEX/DATA based register access */
2380 spinlock_t mmio_idx_lock;
2381 /* protects concurrent SMC based register access */
2382 spinlock_t smc_idx_lock;
2383 /* protects concurrent PLL register access */
2384 spinlock_t pll_idx_lock;
2385 /* protects concurrent MC register access */
2386 spinlock_t mc_idx_lock;
2387 /* protects concurrent PCIE register access */
2388 spinlock_t pcie_idx_lock;
2389 /* protects concurrent PCIE_PORT register access */
2390 spinlock_t pciep_idx_lock;
2391 /* protects concurrent PIF register access */
2392 spinlock_t pif_idx_lock;
2393 /* protects concurrent CG register access */
2394 spinlock_t cg_idx_lock;
2395 /* protects concurrent UVD register access */
2396 spinlock_t uvd_idx_lock;
2397 /* protects concurrent RCU register access */
2398 spinlock_t rcu_idx_lock;
2399 /* protects concurrent DIDT register access */
2400 spinlock_t didt_idx_lock;
2401 /* protects concurrent ENDPOINT (audio) register access */
2402 spinlock_t end_idx_lock;
2403 bus_space_handle_t rmmio_bsh;
2404 void __iomem *rmmio;
2405 radeon_rreg_t mc_rreg;
2406 radeon_wreg_t mc_wreg;
2407 radeon_rreg_t pll_rreg;
2408 radeon_wreg_t pll_wreg;
2409 uint32_t pcie_reg_mask;
2410 radeon_rreg_t pciep_rreg;
2411 radeon_wreg_t pciep_wreg;
2412 /* io port */
2413 bus_space_handle_t rio_mem;
2414 resource_size_t rio_mem_size;
2415 struct radeon_clock clock;
2416 struct radeon_mc mc;
2417 struct radeon_gart gart;
2418 struct radeon_mode_info mode_info;
2419 struct radeon_scratch scratch;
2420 struct radeon_doorbell doorbell;
2421 struct radeon_mman mman;
2422 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS8];
2423 wait_queue_head_t fence_queue;
2424 u64 fence_context;
2425 struct rwlock ring_lock;
2426 struct radeon_ring ring[RADEON_NUM_RINGS8];
2427 bool_Bool ib_pool_ready;
2428 struct radeon_sa_manager ring_tmp_bo;
2429 struct radeon_irq irq;
2430 struct radeon_asic *asic;
2431 struct radeon_gem gem;
2432 struct radeon_pm pm;
2433 struct radeon_uvd uvd;
2434 struct radeon_vce vce;
2435 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH8];
2436 struct radeon_wb wb;
2437 struct radeon_dummy_page dummy_page;
2438 bool_Bool shutdown;
2439 bool_Bool need_swiotlb;
2440 bool_Bool accel_working;
2441 bool_Bool fastfb_working; /* IGP feature*/
2442 bool_Bool needs_reset, in_reset;
2443 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES8];
2444 const struct firmware *me_fw; /* all family ME firmware */
2445 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
2446 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
2447 const struct firmware *mc_fw; /* NI MC firmware */
2448 const struct firmware *ce_fw; /* SI CE firmware */
2449 const struct firmware *mec_fw; /* CIK MEC firmware */
2450 const struct firmware *mec2_fw; /* KV MEC2 firmware */
2451 const struct firmware *sdma_fw; /* CIK SDMA firmware */
2452 const struct firmware *smc_fw; /* SMC firmware */
2453 const struct firmware *uvd_fw; /* UVD firmware */
2454 const struct firmware *vce_fw; /* VCE firmware */
2455 bool_Bool new_fw;
2456 struct r600_vram_scratch vram_scratch;
2457 int msi_enabled; /* msi enabled */
2458 struct r600_ih ih; /* r6/700 interrupt ring */
2459 struct radeon_rlc rlc;
2460 struct radeon_mec mec;
2461 struct delayed_work hotplug_work;
2462 struct work_struct dp_work;
2463 struct work_struct audio_work;
2464 int num_crtc; /* number of crtcs */
2465 struct rwlock dc_hw_i2c_mutex; /* display controller hw i2c mutex */
2466 bool_Bool has_uvd;
2467 bool_Bool has_vce;
2468 struct r600_audio audio; /* audio stuff */
2469 struct notifier_block acpi_nb;
2470 /* only one userspace can use Hyperz features or CMASK at a time */
2471 struct drm_file *hyperz_filp;
2472 struct drm_file *cmask_filp;
2473 /* i2c buses */
2474 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS16];
2475 /* debugfs */
2476 struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS32];
2477 unsigned debugfs_count;
2478 /* virtual memory */
2479 struct radeon_vm_manager vm_manager;
2480 struct rwlock gpu_clock_mutex;
2481 /* memory stats */
2482 atomic64_t vram_usage;
2483 atomic64_t gtt_usage;
2484 atomic64_t num_bytes_moved;
2485 atomic_t gpu_reset_counter;
2486 /* ACPI interface */
2487 struct radeon_atif atif;
2488 struct radeon_atcs atcs;
2489 /* srbm instance registers */
2490 struct rwlock srbm_mutex;
2491 /* clock, powergating flags */
2492 u32 cg_flags;
2493 u32 pg_flags;
2494
2495 struct dev_pm_domain vga_pm_domain;
2496 bool_Bool have_disp_power_ref;
2497 u32 px_quirk_flags;
2498
2499 /* tracking pinned memory */
2500 u64 vram_pin_size;
2501 u64 gart_pin_size;
2502};
2503
2504bool_Bool radeon_is_px(struct drm_device *dev);
2505int radeon_device_init(struct radeon_device *rdev,
2506 struct drm_device *ddev,
2507 struct pci_dev *pdev,
2508 uint32_t flags);
2509void radeon_device_fini(struct radeon_device *rdev);
2510int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
2511
2512#define RADEON_MIN_MMIO_SIZE0x10000 0x10000
2513
2514uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg);
2515void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v);
2516static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
2517 bool_Bool always_indirect)
2518{
2519 /* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
2520 if ((reg < rdev->rmmio_size || reg
7.1
'reg' is < RADEON_MIN_MMIO_SIZE
7.1
'reg' is < RADEON_MIN_MMIO_SIZE
7.1
'reg' is < RADEON_MIN_MMIO_SIZE
< RADEON_MIN_MMIO_SIZE0x10000) && !always_indirect
7.2
'always_indirect' is false
7.2
'always_indirect' is false
7.2
'always_indirect' is false
)
7
Assuming 'reg' is >= field 'rmmio_size'
8
Taking true branch
2521 return readl(((void __iomem *)rdev->rmmio) + reg)ioread32(((void *)rdev->rmmio) + reg);
9
Returning without writing to 'rdev->smc_fw'
2522 else
2523 return r100_mm_rreg_slow(rdev, reg);
2524}
2525static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
2526 bool_Bool always_indirect)
2527{
2528 if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE0x10000) && !always_indirect)
2529 writel(v, ((void __iomem *)rdev->rmmio) + reg)iowrite32(v, ((void *)rdev->rmmio) + reg);
2530 else
2531 r100_mm_wreg_slow(rdev, reg, v);
2532}
2533
2534u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
2535void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2536
2537u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
2538void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
2539
2540/*
2541 * Cast helper
2542 */
2543extern const struct dma_fence_ops radeon_fence_ops;
2544
2545static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f)
2546{
2547 struct radeon_fence *__f = container_of(f, struct radeon_fence, base)({ const __typeof( ((struct radeon_fence *)0)->base ) *__mptr
= (f); (struct radeon_fence *)( (char *)__mptr - __builtin_offsetof
(struct radeon_fence, base) );})
;
2548
2549 if (__f->base.ops == &radeon_fence_ops)
2550 return __f;
2551
2552 return NULL((void *)0);
2553}
2554
2555/*
2556 * Registers read & write functions.
2557 */
2558#define RREG8(reg)ioread8((rdev->rmmio) + (reg)) readb((rdev->rmmio) + (reg))ioread8((rdev->rmmio) + (reg))
2559#define WREG8(reg, v)iowrite8(v, (rdev->rmmio) + (reg)) writeb(v, (rdev->rmmio) + (reg))iowrite8(v, (rdev->rmmio) + (reg))
2560#define RREG16(reg)ioread16((rdev->rmmio) + (reg)) readw((rdev->rmmio) + (reg))ioread16((rdev->rmmio) + (reg))
2561#define WREG16(reg, v)iowrite16(v, (rdev->rmmio) + (reg)) writew(v, (rdev->rmmio) + (reg))iowrite16(v, (rdev->rmmio) + (reg))
2562#define RREG32(reg)r100_mm_rreg(rdev, (reg), 0) r100_mm_rreg(rdev, (reg), false0)
2563#define RREG32_IDX(reg)r100_mm_rreg(rdev, (reg), 1) r100_mm_rreg(rdev, (reg), true1)
2564#define DREG32(reg)do { } while(0) pr_info("REGISTER: " #reg " : 0x%08X\n", \do { } while(0)
2565 r100_mm_rreg(rdev, (reg), false))do { } while(0)
2566#define WREG32(reg, v)r100_mm_wreg(rdev, (reg), (v), 0) r100_mm_wreg(rdev, (reg), (v), false0)
2567#define WREG32_IDX(reg, v)r100_mm_wreg(rdev, (reg), (v), 1) r100_mm_wreg(rdev, (reg), (v), true1)
2568#define REG_SET(FIELD, v)(((v) << FIELD_SHIFT) & FIELD_MASK) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2569#define REG_GET(FIELD, v)(((v) << FIELD_SHIFT) & FIELD_MASK) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2570#define RREG32_PLL(reg)rdev->pll_rreg(rdev, (reg)) rdev->pll_rreg(rdev, (reg))
2571#define WREG32_PLL(reg, v)rdev->pll_wreg(rdev, (reg), (v)) rdev->pll_wreg(rdev, (reg), (v))
2572#define RREG32_MC(reg)rdev->mc_rreg(rdev, (reg)) rdev->mc_rreg(rdev, (reg))
2573#define WREG32_MC(reg, v)rdev->mc_wreg(rdev, (reg), (v)) rdev->mc_wreg(rdev, (reg), (v))
2574#define RREG32_PCIE(reg)rv370_pcie_rreg(rdev, (reg)) rv370_pcie_rreg(rdev, (reg))
2575#define WREG32_PCIE(reg, v)rv370_pcie_wreg(rdev, (reg), (v)) rv370_pcie_wreg(rdev, (reg), (v))
2576#define RREG32_PCIE_PORT(reg)rdev->pciep_rreg(rdev, (reg)) rdev->pciep_rreg(rdev, (reg))
2577#define WREG32_PCIE_PORT(reg, v)rdev->pciep_wreg(rdev, (reg), (v)) rdev->pciep_wreg(rdev, (reg), (v))
2578#define RREG32_SMC(reg)tn_smc_rreg(rdev, (reg)) tn_smc_rreg(rdev, (reg))
2579#define WREG32_SMC(reg, v)tn_smc_wreg(rdev, (reg), (v)) tn_smc_wreg(rdev, (reg), (v))
2580#define RREG32_RCU(reg)r600_rcu_rreg(rdev, (reg)) r600_rcu_rreg(rdev, (reg))
2581#define WREG32_RCU(reg, v)r600_rcu_wreg(rdev, (reg), (v)) r600_rcu_wreg(rdev, (reg), (v))
2582#define RREG32_CG(reg)eg_cg_rreg(rdev, (reg)) eg_cg_rreg(rdev, (reg))
2583#define WREG32_CG(reg, v)eg_cg_wreg(rdev, (reg), (v)) eg_cg_wreg(rdev, (reg), (v))
2584#define RREG32_PIF_PHY0(reg)eg_pif_phy0_rreg(rdev, (reg)) eg_pif_phy0_rreg(rdev, (reg))
2585#define WREG32_PIF_PHY0(reg, v)eg_pif_phy0_wreg(rdev, (reg), (v)) eg_pif_phy0_wreg(rdev, (reg), (v))
2586#define RREG32_PIF_PHY1(reg)eg_pif_phy1_rreg(rdev, (reg)) eg_pif_phy1_rreg(rdev, (reg))
2587#define WREG32_PIF_PHY1(reg, v)eg_pif_phy1_wreg(rdev, (reg), (v)) eg_pif_phy1_wreg(rdev, (reg), (v))
2588#define RREG32_UVD_CTX(reg)r600_uvd_ctx_rreg(rdev, (reg)) r600_uvd_ctx_rreg(rdev, (reg))
2589#define WREG32_UVD_CTX(reg, v)r600_uvd_ctx_wreg(rdev, (reg), (v)) r600_uvd_ctx_wreg(rdev, (reg), (v))
2590#define RREG32_DIDT(reg)cik_didt_rreg(rdev, (reg)) cik_didt_rreg(rdev, (reg))
2591#define WREG32_DIDT(reg, v)cik_didt_wreg(rdev, (reg), (v)) cik_didt_wreg(rdev, (reg), (v))
2592#define WREG32_P(reg, val, mask)do { uint32_t tmp_ = r100_mm_rreg(rdev, (reg), 0); tmp_ &=
(mask); tmp_ |= ((val) & ~(mask)); r100_mm_wreg(rdev, (reg
), (tmp_), 0); } while (0)
\
2593 do { \
2594 uint32_t tmp_ = RREG32(reg)r100_mm_rreg(rdev, (reg), 0); \
2595 tmp_ &= (mask); \
2596 tmp_ |= ((val) & ~(mask)); \
2597 WREG32(reg, tmp_)r100_mm_wreg(rdev, (reg), (tmp_), 0); \
2598 } while (0)
2599#define WREG32_AND(reg, and)do { uint32_t tmp_ = r100_mm_rreg(rdev, (reg), 0); tmp_ &=
(and); tmp_ |= ((0) & ~(and)); r100_mm_wreg(rdev, (reg),
(tmp_), 0); } while (0)
WREG32_P(reg, 0, and)do { uint32_t tmp_ = r100_mm_rreg(rdev, (reg), 0); tmp_ &=
(and); tmp_ |= ((0) & ~(and)); r100_mm_wreg(rdev, (reg),
(tmp_), 0); } while (0)
2600#define WREG32_OR(reg, or)do { uint32_t tmp_ = r100_mm_rreg(rdev, (reg), 0); tmp_ &=
(~(or)); tmp_ |= ((or) & ~(~(or))); r100_mm_wreg(rdev, (
reg), (tmp_), 0); } while (0)
WREG32_P(reg, or, ~(or))do { uint32_t tmp_ = r100_mm_rreg(rdev, (reg), 0); tmp_ &=
(~(or)); tmp_ |= ((or) & ~(~(or))); r100_mm_wreg(rdev, (
reg), (tmp_), 0); } while (0)
2601#define WREG32_PLL_P(reg, val, mask)do { uint32_t tmp_ = rdev->pll_rreg(rdev, (reg)); tmp_ &=
(mask); tmp_ |= ((val) & ~(mask)); rdev->pll_wreg(rdev
, (reg), (tmp_)); } while (0)
\
2602 do { \
2603 uint32_t tmp_ = RREG32_PLL(reg)rdev->pll_rreg(rdev, (reg)); \
2604 tmp_ &= (mask); \
2605 tmp_ |= ((val) & ~(mask)); \
2606 WREG32_PLL(reg, tmp_)rdev->pll_wreg(rdev, (reg), (tmp_)); \
2607 } while (0)
2608#define WREG32_SMC_P(reg, val, mask)do { uint32_t tmp_ = tn_smc_rreg(rdev, (reg)); tmp_ &= (mask
); tmp_ |= ((val) & ~(mask)); tn_smc_wreg(rdev, (reg), (tmp_
)); } while (0)
\
2609 do { \
2610 uint32_t tmp_ = RREG32_SMC(reg)tn_smc_rreg(rdev, (reg)); \
2611 tmp_ &= (mask); \
2612 tmp_ |= ((val) & ~(mask)); \
2613 WREG32_SMC(reg, tmp_)tn_smc_wreg(rdev, (reg), (tmp_)); \
2614 } while (0)
2615#define DREG32_SYS(sqf, rdev, reg)seq_printf((sqf), "reg" " : 0x%08X\n", r100_mm_rreg((rdev), (
reg), 0))
seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false0))
2616#define RREG32_IO(reg)r100_io_rreg(rdev, (reg)) r100_io_rreg(rdev, (reg))
2617#define WREG32_IO(reg, v)r100_io_wreg(rdev, (reg), (v)) r100_io_wreg(rdev, (reg), (v))
2618
2619#define RDOORBELL32(index)cik_mm_rdoorbell(rdev, (index)) cik_mm_rdoorbell(rdev, (index))
2620#define WDOORBELL32(index, v)cik_mm_wdoorbell(rdev, (index), (v)) cik_mm_wdoorbell(rdev, (index), (v))
2621
2622/*
2623 * Indirect registers accessors.
2624 * They used to be inlined, but this increases code size by ~65 kbytes.
2625 * Since each performs a pair of MMIO ops
2626 * within a spin_lock_irqsave/spin_unlock_irqrestore region,
2627 * the cost of call+ret is almost negligible. MMIO and locking
2628 * costs several dozens of cycles each at best, call+ret is ~5 cycles.
2629 */
2630uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
2631void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
2632u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg);
2633void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2634u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg);
2635void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2636u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg);
2637void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2638u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg);
2639void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2640u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg);
2641void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2642u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg);
2643void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2644u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg);
2645void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2646
2647void r100_pll_errata_after_index(struct radeon_device *rdev);
2648
2649
2650/*
2651 * ASICs helpers.
2652 */
2653#define ASIC_IS_RN50(rdev)((rdev->pdev->device == 0x515e) || (rdev->pdev->device
== 0x5969))
((rdev->pdev->device == 0x515e) || \
2654 (rdev->pdev->device == 0x5969))
2655#define ASIC_IS_RV100(rdev)((rdev->family == CHIP_RV100) || (rdev->family == CHIP_RV200
) || (rdev->family == CHIP_RS100) || (rdev->family == CHIP_RS200
) || (rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280
) || (rdev->family == CHIP_RS300))
((rdev->family == CHIP_RV100) || \
2656 (rdev->family == CHIP_RV200) || \
2657 (rdev->family == CHIP_RS100) || \
2658 (rdev->family == CHIP_RS200) || \
2659 (rdev->family == CHIP_RV250) || \
2660 (rdev->family == CHIP_RV280) || \
2661 (rdev->family == CHIP_RS300))
2662#define ASIC_IS_R300(rdev)((rdev->family == CHIP_R300) || (rdev->family == CHIP_RV350
) || (rdev->family == CHIP_R350) || (rdev->family == CHIP_RV380
) || (rdev->family == CHIP_R420) || (rdev->family == CHIP_R423
) || (rdev->family == CHIP_RV410) || (rdev->family == CHIP_RS400
) || (rdev->family == CHIP_RS480))
((rdev->family == CHIP_R300) || \
2663 (rdev->family == CHIP_RV350) || \
2664 (rdev->family == CHIP_R350) || \
2665 (rdev->family == CHIP_RV380) || \
2666 (rdev->family == CHIP_R420) || \
2667 (rdev->family == CHIP_R423) || \
2668 (rdev->family == CHIP_RV410) || \
2669 (rdev->family == CHIP_RS400) || \
2670 (rdev->family == CHIP_RS480))
2671#define ASIC_IS_X2(rdev)((rdev->ddev->pdev->device == 0x9441) || (rdev->ddev
->pdev->device == 0x9443) || (rdev->ddev->pdev->
device == 0x944B) || (rdev->ddev->pdev->device == 0x9506
) || (rdev->ddev->pdev->device == 0x9509) || (rdev->
ddev->pdev->device == 0x950F) || (rdev->ddev->pdev
->device == 0x689C) || (rdev->ddev->pdev->device ==
0x689D))
((rdev->ddev->pdev->device == 0x9441) || \
2672 (rdev->ddev->pdev->device == 0x9443) || \
2673 (rdev->ddev->pdev->device == 0x944B) || \
2674 (rdev->ddev->pdev->device == 0x9506) || \
2675 (rdev->ddev->pdev->device == 0x9509) || \
2676 (rdev->ddev->pdev->device == 0x950F) || \
2677 (rdev->ddev->pdev->device == 0x689C) || \
2678 (rdev->ddev->pdev->device == 0x689D))
2679#define ASIC_IS_AVIVO(rdev)((rdev->family >= CHIP_RS600)) ((rdev->family >= CHIP_RS600))
2680#define ASIC_IS_DCE2(rdev)((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690
) || (rdev->family == CHIP_RS740) || (rdev->family >=
CHIP_R600))
((rdev->family == CHIP_RS600) || \
2681 (rdev->family == CHIP_RS690) || \
2682 (rdev->family == CHIP_RS740) || \
2683 (rdev->family >= CHIP_R600))
2684#define ASIC_IS_DCE3(rdev)((rdev->family >= CHIP_RV620)) ((rdev->family >= CHIP_RV620))
2685#define ASIC_IS_DCE32(rdev)((rdev->family >= CHIP_RV730)) ((rdev->family >= CHIP_RV730))
2686#define ASIC_IS_DCE4(rdev)((rdev->family >= CHIP_CEDAR)) ((rdev->family >= CHIP_CEDAR))
2687#define ASIC_IS_DCE41(rdev)((rdev->family >= CHIP_PALM) && (rdev->flags
& RADEON_IS_IGP))
((rdev->family >= CHIP_PALM) && \
2688 (rdev->flags & RADEON_IS_IGP))
2689#define ASIC_IS_DCE5(rdev)((rdev->family >= CHIP_BARTS)) ((rdev->family >= CHIP_BARTS))
2690#define ASIC_IS_DCE6(rdev)((rdev->family >= CHIP_ARUBA)) ((rdev->family >= CHIP_ARUBA))
2691#define ASIC_IS_DCE61(rdev)((rdev->family >= CHIP_ARUBA) && (rdev->flags
& RADEON_IS_IGP))
((rdev->family >= CHIP_ARUBA) && \
2692 (rdev->flags & RADEON_IS_IGP))
2693#define ASIC_IS_DCE64(rdev)((rdev->family == CHIP_OLAND)) ((rdev->family == CHIP_OLAND))
2694#define ASIC_IS_NODCE(rdev)((rdev->family == CHIP_HAINAN)) ((rdev->family == CHIP_HAINAN))
2695#define ASIC_IS_DCE8(rdev)((rdev->family >= CHIP_BONAIRE)) ((rdev->family >= CHIP_BONAIRE))
2696#define ASIC_IS_DCE81(rdev)((rdev->family == CHIP_KAVERI)) ((rdev->family == CHIP_KAVERI))
2697#define ASIC_IS_DCE82(rdev)((rdev->family == CHIP_BONAIRE)) ((rdev->family == CHIP_BONAIRE))
2698#define ASIC_IS_DCE83(rdev)((rdev->family == CHIP_KABINI) || (rdev->family == CHIP_MULLINS
))
((rdev->family == CHIP_KABINI) || \
2699 (rdev->family == CHIP_MULLINS))
2700
2701#define ASIC_IS_LOMBOK(rdev)((rdev->ddev->pdev->device == 0x6849) || (rdev->ddev
->pdev->device == 0x6850) || (rdev->ddev->pdev->
device == 0x6858) || (rdev->ddev->pdev->device == 0x6859
) || (rdev->ddev->pdev->device == 0x6840) || (rdev->
ddev->pdev->device == 0x6841) || (rdev->ddev->pdev
->device == 0x6842) || (rdev->ddev->pdev->device ==
0x6843))
((rdev->ddev->pdev->device == 0x6849) || \
2702 (rdev->ddev->pdev->device == 0x6850) || \
2703 (rdev->ddev->pdev->device == 0x6858) || \
2704 (rdev->ddev->pdev->device == 0x6859) || \
2705 (rdev->ddev->pdev->device == 0x6840) || \
2706 (rdev->ddev->pdev->device == 0x6841) || \
2707 (rdev->ddev->pdev->device == 0x6842) || \
2708 (rdev->ddev->pdev->device == 0x6843))
2709
2710/*
2711 * BIOS helpers.
2712 */
2713#define RBIOS8(i)(rdev->bios[i]) (rdev->bios[i])
2714#define RBIOS16(i)((rdev->bios[i]) | ((rdev->bios[(i)+1]) << 8)) (RBIOS8(i)(rdev->bios[i]) | (RBIOS8((i)+1)(rdev->bios[(i)+1]) << 8))
2715#define RBIOS32(i)((((rdev->bios[i]) | ((rdev->bios[(i)+1]) << 8)))
| (((rdev->bios[(i)+2]) | ((rdev->bios[((i)+2)+1]) <<
8)) << 16))
((RBIOS16(i)((rdev->bios[i]) | ((rdev->bios[(i)+1]) << 8))) | (RBIOS16((i)+2)((rdev->bios[(i)+2]) | ((rdev->bios[((i)+2)+1]) <<
8))
<< 16))
2716
2717int radeon_combios_init(struct radeon_device *rdev);
2718void radeon_combios_fini(struct radeon_device *rdev);
2719int radeon_atombios_init(struct radeon_device *rdev);
2720void radeon_atombios_fini(struct radeon_device *rdev);
2721
2722
2723/*
2724 * RING helpers.
2725 */
2726
2727/**
2728 * radeon_ring_write - write a value to the ring
2729 *
2730 * @ring: radeon_ring structure holding ring information
2731 * @v: dword (dw) value to write
2732 *
2733 * Write a value to the requested ring buffer (all asics).
2734 */
2735static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
2736{
2737 if (ring->count_dw <= 0)
2738 DRM_ERROR("radeon: writing more dwords to the ring than expected!\n")__drm_err("radeon: writing more dwords to the ring than expected!\n"
)
;
2739
2740 ring->ring[ring->wptr++] = v;
2741 ring->wptr &= ring->ptr_mask;
2742 ring->count_dw--;
2743 ring->ring_free_dw--;
2744}
2745
2746/*
2747 * ASICs macro.
2748 */
2749#define radeon_init(rdev)(rdev)->asic->init((rdev)) (rdev)->asic->init((rdev))
2750#define radeon_fini(rdev)(rdev)->asic->fini((rdev)) (rdev)->asic->fini((rdev))
2751#define radeon_resume(rdev)(rdev)->asic->resume((rdev)) (rdev)->asic->resume((rdev))
2752#define radeon_suspend(rdev)(rdev)->asic->suspend((rdev)) (rdev)->asic->suspend((rdev))
2753#define radeon_cs_parse(rdev, r, p)(rdev)->asic->ring[(r)]->cs_parse((p)) (rdev)->asic->ring[(r)]->cs_parse((p))
2754#define radeon_vga_set_state(rdev, state)(rdev)->asic->vga_set_state((rdev), (state)) (rdev)->asic->vga_set_state((rdev), (state))
2755#define radeon_asic_reset(rdev)(rdev)->asic->asic_reset((rdev), 0) (rdev)->asic->asic_reset((rdev), false0)
2756#define radeon_gart_tlb_flush(rdev)(rdev)->asic->gart.tlb_flush((rdev)) (rdev)->asic->gart.tlb_flush((rdev))
2757#define radeon_gart_get_page_entry(a, f)(rdev)->asic->gart.get_page_entry((a), (f)) (rdev)->asic->gart.get_page_entry((a), (f))
2758#define radeon_gart_set_page(rdev, i, e)(rdev)->asic->gart.set_page((rdev), (i), (e)) (rdev)->asic->gart.set_page((rdev), (i), (e))
2759#define radeon_asic_vm_init(rdev)(rdev)->asic->vm.init((rdev)) (rdev)->asic->vm.init((rdev))
2760#define radeon_asic_vm_fini(rdev)(rdev)->asic->vm.fini((rdev)) (rdev)->asic->vm.fini((rdev))
2761#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count)((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src),
(count)))
((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
2762#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags)((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr
), (count), (incr), (flags)))
((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2763#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags)((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr),
(count), (incr), (flags)))
((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2764#define radeon_asic_vm_pad_ib(rdev, ib)((rdev)->asic->vm.pad_ib((ib))) ((rdev)->asic->vm.pad_ib((ib)))
2765#define radeon_ring_start(rdev, r, cp)(rdev)->asic->ring[(r)]->ring_start((rdev), (cp)) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
2766#define radeon_ring_test(rdev, r, cp)(rdev)->asic->ring[(r)]->ring_test((rdev), (cp)) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
2767#define radeon_ib_test(rdev, r, cp)(rdev)->asic->ring[(r)]->ib_test((rdev), (cp)) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
2768#define radeon_ring_ib_execute(rdev, r, ib)(rdev)->asic->ring[(r)]->ib_execute((rdev), (ib)) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
2769#define radeon_ring_ib_parse(rdev, r, ib)(rdev)->asic->ring[(r)]->ib_parse((rdev), (ib)) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
2770#define radeon_ring_is_lockup(rdev, r, cp)(rdev)->asic->ring[(r)]->is_lockup((rdev), (cp)) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
2771#define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr)(rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r
), (vm_id), (pd_addr))
(rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
2772#define radeon_ring_get_rptr(rdev, r)(rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r
))
(rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
2773#define radeon_ring_get_wptr(rdev, r)(rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r
))
(rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
2774#define radeon_ring_set_wptr(rdev, r)(rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r
))
(rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
2775#define radeon_irq_set(rdev)(rdev)->asic->irq.set((rdev)) (rdev)->asic->irq.set((rdev))
2776#define radeon_irq_process(rdev)(rdev)->asic->irq.process((rdev)) (rdev)->asic->irq.process((rdev))
2777#define radeon_get_vblank_counter(rdev, crtc)(rdev)->asic->display.get_vblank_counter((rdev), (crtc)
)
(rdev)->asic->display.get_vblank_counter((rdev), (crtc))
2778#define radeon_set_backlight_level(rdev, e, l)(rdev)->asic->display.set_backlight_level((e), (l)) (rdev)->asic->display.set_backlight_level((e), (l))
2779#define radeon_get_backlight_level(rdev, e)(rdev)->asic->display.get_backlight_level((e)) (rdev)->asic->display.get_backlight_level((e))
2780#define radeon_hdmi_enable(rdev, e, b)(rdev)->asic->display.hdmi_enable((e), (b)) (rdev)->asic->display.hdmi_enable((e), (b))
2781#define radeon_hdmi_setmode(rdev, e, m)(rdev)->asic->display.hdmi_setmode((e), (m)) (rdev)->asic->display.hdmi_setmode((e), (m))
2782#define radeon_fence_ring_emit(rdev, r, fence)(rdev)->asic->ring[(r)]->emit_fence((rdev), (fence)) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
2783#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait)(rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp)
, (semaphore), (emit_wait))
(rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
2784#define radeon_copy_blit(rdev, s, d, np, resv)(rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv)) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv))
2785#define radeon_copy_dma(rdev, s, d, np, resv)(rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv)) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv))
2786#define radeon_copy(rdev, s, d, np, resv)(rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv)) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv))
2787#define radeon_copy_blit_ring_index(rdev)(rdev)->asic->copy.blit_ring_index (rdev)->asic->copy.blit_ring_index
2788#define radeon_copy_dma_ring_index(rdev)(rdev)->asic->copy.dma_ring_index (rdev)->asic->copy.dma_ring_index
2789#define radeon_copy_ring_index(rdev)(rdev)->asic->copy.copy_ring_index (rdev)->asic->copy.copy_ring_index
2790#define radeon_get_engine_clock(rdev)(rdev)->asic->pm.get_engine_clock((rdev)) (rdev)->asic->pm.get_engine_clock((rdev))
2791#define radeon_set_engine_clock(rdev, e)(rdev)->asic->pm.set_engine_clock((rdev), (e)) (rdev)->asic->pm.set_engine_clock((rdev), (e))
2792#define radeon_get_memory_clock(rdev)(rdev)->asic->pm.get_memory_clock((rdev)) (rdev)->asic->pm.get_memory_clock((rdev))
2793#define radeon_set_memory_clock(rdev, e)(rdev)->asic->pm.set_memory_clock((rdev), (e)) (rdev)->asic->pm.set_memory_clock((rdev), (e))
2794#define radeon_get_pcie_lanes(rdev)(rdev)->asic->pm.get_pcie_lanes((rdev)) (rdev)->asic->pm.get_pcie_lanes((rdev))
2795#define radeon_set_pcie_lanes(rdev, l)(rdev)->asic->pm.set_pcie_lanes((rdev), (l)) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
2796#define radeon_set_clock_gating(rdev, e)(rdev)->asic->pm.set_clock_gating((rdev), (e)) (rdev)->asic->pm.set_clock_gating((rdev), (e))
2797#define radeon_set_uvd_clocks(rdev, v, d)(rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d)) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
2798#define radeon_set_vce_clocks(rdev, ev, ec)(rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec)) (rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec))
2799#define radeon_get_temperature(rdev)(rdev)->asic->pm.get_temperature((rdev)) (rdev)->asic->pm.get_temperature((rdev))
2800#define radeon_set_surface_reg(rdev, r, f, p, o, s)((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (
o), (s)))
((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
2801#define radeon_clear_surface_reg(rdev, r)((rdev)->asic->surface.clear_reg((rdev), (r))) ((rdev)->asic->surface.clear_reg((rdev), (r)))
2802#define radeon_bandwidth_update(rdev)(rdev)->asic->display.bandwidth_update((rdev)) (rdev)->asic->display.bandwidth_update((rdev))
2803#define radeon_hpd_init(rdev)(rdev)->asic->hpd.init((rdev)) (rdev)->asic->hpd.init((rdev))
2804#define radeon_hpd_fini(rdev)(rdev)->asic->hpd.fini((rdev)) (rdev)->asic->hpd.fini((rdev))
2805#define radeon_hpd_sense(rdev, h)(rdev)->asic->hpd.sense((rdev), (h)) (rdev)->asic->hpd.sense((rdev), (h))
2806#define radeon_hpd_set_polarity(rdev, h)(rdev)->asic->hpd.set_polarity((rdev), (h)) (rdev)->asic->hpd.set_polarity((rdev), (h))
2807#define radeon_gui_idle(rdev)(rdev)->asic->gui_idle((rdev)) (rdev)->asic->gui_idle((rdev))
2808#define radeon_pm_misc(rdev)(rdev)->asic->pm.misc((rdev)) (rdev)->asic->pm.misc((rdev))
2809#define radeon_pm_prepare(rdev)(rdev)->asic->pm.prepare((rdev)) (rdev)->asic->pm.prepare((rdev))
2810#define radeon_pm_finish(rdev)(rdev)->asic->pm.finish((rdev)) (rdev)->asic->pm.finish((rdev))
2811#define radeon_pm_init_profile(rdev)(rdev)->asic->pm.init_profile((rdev)) (rdev)->asic->pm.init_profile((rdev))
2812#define radeon_pm_get_dynpm_state(rdev)(rdev)->asic->pm.get_dynpm_state((rdev)) (rdev)->asic->pm.get_dynpm_state((rdev))
2813#define radeon_page_flip(rdev, crtc, base, async)(rdev)->asic->pflip.page_flip((rdev), (crtc), (base), (
async))
(rdev)->asic->pflip.page_flip((rdev), (crtc), (base), (async))
2814#define radeon_page_flip_pending(rdev, crtc)(rdev)->asic->pflip.page_flip_pending((rdev), (crtc)) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
2815#define radeon_wait_for_vblank(rdev, crtc)(rdev)->asic->display.wait_for_vblank((rdev), (crtc)) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
2816#define radeon_mc_wait_for_idle(rdev)(rdev)->asic->mc_wait_for_idle((rdev)) (rdev)->asic->mc_wait_for_idle((rdev))
2817#define radeon_get_xclk(rdev)(rdev)->asic->get_xclk((rdev)) (rdev)->asic->get_xclk((rdev))
2818#define radeon_get_gpu_clock_counter(rdev)(rdev)->asic->get_gpu_clock_counter((rdev)) (rdev)->asic->get_gpu_clock_counter((rdev))
2819#define radeon_get_allowed_info_register(rdev, r, v)(rdev)->asic->get_allowed_info_register((rdev), (r), (v
))
(rdev)->asic->get_allowed_info_register((rdev), (r), (v))
2820#define radeon_dpm_init(rdev)rdev->asic->dpm.init((rdev)) rdev->asic->dpm.init((rdev))
2821#define radeon_dpm_setup_asic(rdev)rdev->asic->dpm.setup_asic((rdev)) rdev->asic->dpm.setup_asic((rdev))
2822#define radeon_dpm_enable(rdev)rdev->asic->dpm.enable((rdev)) rdev->asic->dpm.enable((rdev))
2823#define radeon_dpm_late_enable(rdev)rdev->asic->dpm.late_enable((rdev)) rdev->asic->dpm.late_enable((rdev))
2824#define radeon_dpm_disable(rdev)rdev->asic->dpm.disable((rdev)) rdev->asic->dpm.disable((rdev))
2825#define radeon_dpm_pre_set_power_state(rdev)rdev->asic->dpm.pre_set_power_state((rdev)) rdev->asic->dpm.pre_set_power_state((rdev))
2826#define radeon_dpm_set_power_state(rdev)rdev->asic->dpm.set_power_state((rdev)) rdev->asic->dpm.set_power_state((rdev))
2827#define radeon_dpm_post_set_power_state(rdev)rdev->asic->dpm.post_set_power_state((rdev)) rdev->asic->dpm.post_set_power_state((rdev))
2828#define radeon_dpm_display_configuration_changed(rdev)rdev->asic->dpm.display_configuration_changed((rdev)) rdev->asic->dpm.display_configuration_changed((rdev))
2829#define radeon_dpm_fini(rdev)rdev->asic->dpm.fini((rdev)) rdev->asic->dpm.fini((rdev))
2830#define radeon_dpm_get_sclk(rdev, l)rdev->asic->dpm.get_sclk((rdev), (l)) rdev->asic->dpm.get_sclk((rdev), (l))
2831#define radeon_dpm_get_mclk(rdev, l)rdev->asic->dpm.get_mclk((rdev), (l)) rdev->asic->dpm.get_mclk((rdev), (l))
2832#define radeon_dpm_print_power_state(rdev, ps)rdev->asic->dpm.print_power_state((rdev), (ps)) rdev->asic->dpm.print_power_state((rdev), (ps))
2833#define radeon_dpm_debugfs_print_current_performance_level(rdev, m)rdev->asic->dpm.debugfs_print_current_performance_level
((rdev), (m))
rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
2834#define radeon_dpm_force_performance_level(rdev, l)rdev->asic->dpm.force_performance_level((rdev), (l)) rdev->asic->dpm.force_performance_level((rdev), (l))
2835#define radeon_dpm_vblank_too_short(rdev)rdev->asic->dpm.vblank_too_short((rdev)) rdev->asic->dpm.vblank_too_short((rdev))
2836#define radeon_dpm_powergate_uvd(rdev, g)rdev->asic->dpm.powergate_uvd((rdev), (g)) rdev->asic->dpm.powergate_uvd((rdev), (g))
2837#define radeon_dpm_enable_bapm(rdev, e)rdev->asic->dpm.enable_bapm((rdev), (e)) rdev->asic->dpm.enable_bapm((rdev), (e))
2838#define radeon_dpm_get_current_sclk(rdev)rdev->asic->dpm.get_current_sclk((rdev)) rdev->asic->dpm.get_current_sclk((rdev))
2839#define radeon_dpm_get_current_mclk(rdev)rdev->asic->dpm.get_current_mclk((rdev)) rdev->asic->dpm.get_current_mclk((rdev))
2840
2841/* Common functions */
2842/* AGP */
2843extern int radeon_gpu_reset(struct radeon_device *rdev);
2844extern void radeon_pci_config_reset(struct radeon_device *rdev);
2845extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool_Bool hung);
2846extern void radeon_agp_disable(struct radeon_device *rdev);
2847extern int radeon_modeset_init(struct radeon_device *rdev);
2848extern void radeon_modeset_fini(struct radeon_device *rdev);
2849extern bool_Bool radeon_card_posted(struct radeon_device *rdev);
2850extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
2851extern void radeon_update_display_priority(struct radeon_device *rdev);
2852extern bool_Bool radeon_boot_test_post_card(struct radeon_device *rdev);
2853extern void radeon_scratch_init(struct radeon_device *rdev);
2854extern void radeon_wb_fini(struct radeon_device *rdev);
2855extern int radeon_wb_init(struct radeon_device *rdev);
2856extern void radeon_wb_disable(struct radeon_device *rdev);
2857extern void radeon_surface_init(struct radeon_device *rdev);
2858extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
2859extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
2860extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
2861extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
2862extern bool_Bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
2863extern int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
2864 struct ttm_tt *ttm, uint64_t addr,
2865 uint32_t flags);
2866extern bool_Bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm);
2867extern bool_Bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm);
2868bool_Bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
2869extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
2870extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
2871extern int radeon_resume_kms(struct drm_device *dev, bool_Bool resume, bool_Bool fbcon);
2872extern int radeon_suspend_kms(struct drm_device *dev, bool_Bool suspend,
2873 bool_Bool fbcon, bool_Bool freeze);
2874extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
2875extern void radeon_program_register_sequence(struct radeon_device *rdev,
2876 const u32 *registers,
2877 const u32 array_size);
2878struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev);
2879
2880/*
2881 * vm
2882 */
2883int radeon_vm_manager_init(struct radeon_device *rdev);
2884void radeon_vm_manager_fini(struct radeon_device *rdev);
2885int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
2886void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
2887struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
2888 struct radeon_vm *vm,
2889 struct list_head *head);
2890struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
2891 struct radeon_vm *vm, int ring);
2892void radeon_vm_flush(struct radeon_device *rdev,
2893 struct radeon_vm *vm,
2894 int ring, struct radeon_fence *fence);
2895void radeon_vm_fence(struct radeon_device *rdev,
2896 struct radeon_vm *vm,
2897 struct radeon_fence *fence);
2898uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
2899int radeon_vm_update_page_directory(struct radeon_device *rdev,
2900 struct radeon_vm *vm);
2901int radeon_vm_clear_freed(struct radeon_device *rdev,
2902 struct radeon_vm *vm);
2903int radeon_vm_clear_invalids(struct radeon_device *rdev,
2904 struct radeon_vm *vm);
2905int radeon_vm_bo_update(struct radeon_device *rdev,
2906 struct radeon_bo_va *bo_va,
2907 struct ttm_resource *mem);
2908void radeon_vm_bo_invalidate(struct radeon_device *rdev,
2909 struct radeon_bo *bo);
2910struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
2911 struct radeon_bo *bo);
2912struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
2913 struct radeon_vm *vm,
2914 struct radeon_bo *bo);
2915int radeon_vm_bo_set_addr(struct radeon_device *rdev,
2916 struct radeon_bo_va *bo_va,
2917 uint64_t offset,
2918 uint32_t flags);
2919void radeon_vm_bo_rmv(struct radeon_device *rdev,
2920 struct radeon_bo_va *bo_va);
2921
2922/* audio */
2923void r600_audio_update_hdmi(struct work_struct *work);
2924struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
2925struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
2926void r600_audio_enable(struct radeon_device *rdev,
2927 struct r600_audio_pin *pin,
2928 u8 enable_mask);
2929void dce6_audio_enable(struct radeon_device *rdev,
2930 struct r600_audio_pin *pin,
2931 u8 enable_mask);
2932
2933/*
2934 * R600 vram scratch functions
2935 */
2936int r600_vram_scratch_init(struct radeon_device *rdev);
2937void r600_vram_scratch_fini(struct radeon_device *rdev);
2938
2939/*
2940 * r600 cs checking helper
2941 */
2942unsigned r600_mip_minify(unsigned size, unsigned level);
2943bool_Bool r600_fmt_is_valid_color(u32 format);
2944bool_Bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
2945int r600_fmt_get_blocksize(u32 format);
2946int r600_fmt_get_nblocksx(u32 format, u32 w);
2947int r600_fmt_get_nblocksy(u32 format, u32 h);
2948
2949/*
2950 * r600 functions used by radeon_encoder.c
2951 */
2952struct radeon_hdmi_acr {
2953 u32 clock;
2954
2955 int n_32khz;
2956 int cts_32khz;
2957
2958 int n_44_1khz;
2959 int cts_44_1khz;
2960
2961 int n_48khz;
2962 int cts_48khz;
2963
2964};
2965
2966extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
2967
2968extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
2969 u32 tiling_pipe_num,
2970 u32 max_rb_num,
2971 u32 total_max_rb_num,
2972 u32 enabled_rb_mask);
2973
2974/*
2975 * evergreen functions used by radeon_encoder.c
2976 */
2977
2978extern int ni_init_microcode(struct radeon_device *rdev);
2979extern int ni_mc_load_microcode(struct radeon_device *rdev);
2980
2981/* radeon_acpi.c */
2982#if defined(CONFIG_ACPI1)
2983extern int radeon_acpi_init(struct radeon_device *rdev);
2984extern void radeon_acpi_fini(struct radeon_device *rdev);
2985extern bool_Bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev);
2986extern int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
2987 u8 perf_req, bool_Bool advertise);
2988extern int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev);
2989#else
2990static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
2991static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
2992#endif
2993
2994int radeon_cs_packet_parse(struct radeon_cs_parser *p,
2995 struct radeon_cs_packet *pkt,
2996 unsigned idx);
2997bool_Bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
2998void radeon_cs_dump_packet(struct radeon_cs_parser *p,
2999 struct radeon_cs_packet *pkt);
3000int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
3001 struct radeon_bo_list **cs_reloc,
3002 int nomm);
3003int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
3004 uint32_t *vline_start_end,
3005 uint32_t *vline_status);
3006
3007/* interrupt control register helpers */
3008void radeon_irq_kms_set_irq_n_enabled(struct radeon_device *rdev,
3009 u32 reg, u32 mask,
3010 bool_Bool enable, const char *name,
3011 unsigned n);
3012
3013#include "radeon_object.h"
3014
3015#endif

/usr/src/sys/dev/pci/drm/include/linux/firmware.h

1/* Public domain. */
2
3#ifndef _LINUX_FIRMWARE_H
4#define _LINUX_FIRMWARE_H
5
6#include <sys/types.h>
7#include <sys/malloc.h>
8#include <sys/device.h>
9#include <linux/types.h>
10#include <linux/gfp.h>
11
12#ifndef __DECONST
13#define __DECONST(type, var)((type)(__uintptr_t)(const void *)(var)) ((type)(__uintptr_t)(const void *)(var))
14#endif
15
16struct firmware {
17 size_t size;
18 const u8 *data;
19};
20
21static inline int
22request_firmware(const struct firmware **fw, const char *name,
23 struct device *device)
24{
25 int r;
26 struct firmware *f = malloc(sizeof(struct firmware), M_DRM145,
27 M_WAITOK0x0001 | M_ZERO0x0008);
28 r = loadfirmware(name, __DECONST(u_char **, &f->data)((u_char **)(__uintptr_t)(const void *)(&f->data)), &f->size);
29 if (r != 0) {
14
Assuming 'r' is equal to 0
15
Taking false branch
22
Assuming 'r' is equal to 0
23
Taking false branch
30
Assuming 'r' is equal to 0
31
Taking false branch
38
Assuming 'r' is equal to 0
39
Taking false branch
47
Assuming 'r' is equal to 0
48
Taking false branch
59
Assuming 'r' is not equal to 0
60
Taking true branch
30 free(f, M_DRM145, sizeof(struct firmware));
31 *fw = NULL((void *)0);
61
Null pointer value stored to field 'smc_fw'
32 return -r;
33 } else {
34 *fw = f;
35 return 0;
16
Returning zero, which participates in a condition later
24
Returning zero, which participates in a condition later
32
Returning zero, which participates in a condition later
40
Returning zero, which participates in a condition later
49
Returning zero, which participates in a condition later
36 }
37}
38
39static inline int
40request_firmware_direct(const struct firmware **fw, const char *name,
41 struct device *device)
42{
43 return request_firmware(fw, name, device);
44}
45
46#define request_firmware_nowait(a, b, c, d, e, f, g)-22 -EINVAL22
47
48static inline void
49release_firmware(const struct firmware *fw)
50{
51 if (fw)
52 free(__DECONST(u_char *, fw->data)((u_char *)(__uintptr_t)(const void *)(fw->data)), M_DEVBUF2, fw->size);
53 free(__DECONST(struct firmware *, fw)((struct firmware *)(__uintptr_t)(const void *)(fw)), M_DRM145, sizeof(*fw));
54}
55
56#endif