Bug Summary

File:dev/pci/drm/radeon/r100.c
Warning:line 1052, column 13
Access to field 'size' results in a dereference of a null pointer (loaded from field 'me_fw')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name r100.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/radeon/r100.c

/usr/src/sys/dev/pci/drm/radeon/r100.c

1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29#include <linux/firmware.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/seq_file.h>
33#include <linux/slab.h>
34
35#include <drm/drm_debugfs.h>
36#include <drm/drm_device.h>
37#include <drm/drm_file.h>
38#include <drm/drm_fourcc.h>
39#include <drm/drm_vblank.h>
40#include <drm/radeon_drm.h>
41
42#include "atom.h"
43#include "r100_reg_safe.h"
44#include "r100d.h"
45#include "radeon.h"
46#include "radeon_asic.h"
47#include "radeon_reg.h"
48#include "rn50_reg_safe.h"
49#include "rs100d.h"
50#include "rv200d.h"
51#include "rv250d.h"
52
53/* Firmware Names */
54#define FIRMWARE_R100"radeon/R100_cp.bin" "radeon/R100_cp.bin"
55#define FIRMWARE_R200"radeon/R200_cp.bin" "radeon/R200_cp.bin"
56#define FIRMWARE_R300"radeon/R300_cp.bin" "radeon/R300_cp.bin"
57#define FIRMWARE_R420"radeon/R420_cp.bin" "radeon/R420_cp.bin"
58#define FIRMWARE_RS690"radeon/RS690_cp.bin" "radeon/RS690_cp.bin"
59#define FIRMWARE_RS600"radeon/RS600_cp.bin" "radeon/RS600_cp.bin"
60#define FIRMWARE_R520"radeon/R520_cp.bin" "radeon/R520_cp.bin"
61
62MODULE_FIRMWARE(FIRMWARE_R100);
63MODULE_FIRMWARE(FIRMWARE_R200);
64MODULE_FIRMWARE(FIRMWARE_R300);
65MODULE_FIRMWARE(FIRMWARE_R420);
66MODULE_FIRMWARE(FIRMWARE_RS690);
67MODULE_FIRMWARE(FIRMWARE_RS600);
68MODULE_FIRMWARE(FIRMWARE_R520);
69
70#include "r100_track.h"
71
72/* This files gather functions specifics to:
73 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
74 * and others in some cases.
75 */
76
77static bool_Bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
78{
79 if (crtc == 0) {
80 if (RREG32(RADEON_CRTC_STATUS)r100_mm_rreg(rdev, (0x005c), 0) & RADEON_CRTC_VBLANK_CUR(1 << 0))
81 return true1;
82 else
83 return false0;
84 } else {
85 if (RREG32(RADEON_CRTC2_STATUS)r100_mm_rreg(rdev, (0x03fc), 0) & RADEON_CRTC2_VBLANK_CUR(1 << 0))
86 return true1;
87 else
88 return false0;
89 }
90}
91
92static bool_Bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
93{
94 u32 vline1, vline2;
95
96 if (crtc == 0) {
97 vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE)r100_mm_rreg(rdev, (0x0210), 0) >> 16) & RADEON_CRTC_V_TOTAL(0x07ff << 0);
98 vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE)r100_mm_rreg(rdev, (0x0210), 0) >> 16) & RADEON_CRTC_V_TOTAL(0x07ff << 0);
99 } else {
100 vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE)r100_mm_rreg(rdev, (0x0310), 0) >> 16) & RADEON_CRTC_V_TOTAL(0x07ff << 0);
101 vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE)r100_mm_rreg(rdev, (0x0310), 0) >> 16) & RADEON_CRTC_V_TOTAL(0x07ff << 0);
102 }
103 if (vline1 != vline2)
104 return true1;
105 else
106 return false0;
107}
108
109/**
110 * r100_wait_for_vblank - vblank wait asic callback.
111 *
112 * @rdev: radeon_device pointer
113 * @crtc: crtc to wait for vblank on
114 *
115 * Wait for vblank on the requested crtc (r1xx-r4xx).
116 */
117void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
118{
119 unsigned i = 0;
120
121 if (crtc >= rdev->num_crtc)
122 return;
123
124 if (crtc == 0) {
125 if (!(RREG32(RADEON_CRTC_GEN_CNTL)r100_mm_rreg(rdev, (0x0050), 0) & RADEON_CRTC_EN(1 << 25)))
126 return;
127 } else {
128 if (!(RREG32(RADEON_CRTC2_GEN_CNTL)r100_mm_rreg(rdev, (0x03f8), 0) & RADEON_CRTC2_EN(1 << 25)))
129 return;
130 }
131
132 /* depending on when we hit vblank, we may be close to active; if so,
133 * wait for another frame.
134 */
135 while (r100_is_in_vblank(rdev, crtc)) {
136 if (i++ % 100 == 0) {
137 if (!r100_is_counter_moving(rdev, crtc))
138 break;
139 }
140 }
141
142 while (!r100_is_in_vblank(rdev, crtc)) {
143 if (i++ % 100 == 0) {
144 if (!r100_is_counter_moving(rdev, crtc))
145 break;
146 }
147 }
148}
149
150/**
151 * r100_page_flip - pageflip callback.
152 *
153 * @rdev: radeon_device pointer
154 * @crtc_id: crtc to cleanup pageflip on
155 * @crtc_base: new address of the crtc (GPU MC address)
156 *
157 * Does the actual pageflip (r1xx-r4xx).
158 * During vblank we take the crtc lock and wait for the update_pending
159 * bit to go high, when it does, we release the lock, and allow the
160 * double buffered update to take place.
161 */
162void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool_Bool async)
163{
164 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
165 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK(1<<31);
166 int i;
167
168 /* Lock the graphics update lock */
169 /* update the scanout addresses */
170 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp)r100_mm_wreg(rdev, (0x0224 + radeon_crtc->crtc_offset), (tmp
), 0)
;
171
172 /* Wait for update_pending to go high. */
173 for (i = 0; i < rdev->usec_timeout; i++) {
174 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset)r100_mm_rreg(rdev, (0x0224 + radeon_crtc->crtc_offset), 0) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET(1<<30))
175 break;
176 udelay(1);
177 }
178 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n")__drm_dbg(DRM_UT_CORE, "Update pending now high. Unlocking vupdate_lock.\n"
)
;
179
180 /* Unlock the lock, so double-buffering can take place inside vblank */
181 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK(1<<31);
182 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp)r100_mm_wreg(rdev, (0x0224 + radeon_crtc->crtc_offset), (tmp
), 0)
;
183
184}
185
186/**
187 * r100_page_flip_pending - check if page flip is still pending
188 *
189 * @rdev: radeon_device pointer
190 * @crtc_id: crtc to check
191 *
192 * Check if the last pagefilp is still pending (r1xx-r4xx).
193 * Returns the current update pending status.
194 */
195bool_Bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id)
196{
197 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
198
199 /* Return current update_pending status: */
200 return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset)r100_mm_rreg(rdev, (0x0224 + radeon_crtc->crtc_offset), 0) &
201 RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET(1<<30));
202}
203
204/**
205 * r100_pm_get_dynpm_state - look up dynpm power state callback.
206 *
207 * @rdev: radeon_device pointer
208 *
209 * Look up the optimal power state based on the
210 * current state of the GPU (r1xx-r5xx).
211 * Used for dynpm only.
212 */
213void r100_pm_get_dynpm_state(struct radeon_device *rdev)
214{
215 int i;
216 rdev->pm.dynpm_can_upclock = true1;
217 rdev->pm.dynpm_can_downclock = true1;
218
219 switch (rdev->pm.dynpm_planned_action) {
220 case DYNPM_ACTION_MINIMUM:
221 rdev->pm.requested_power_state_index = 0;
222 rdev->pm.dynpm_can_downclock = false0;
223 break;
224 case DYNPM_ACTION_DOWNCLOCK:
225 if (rdev->pm.current_power_state_index == 0) {
226 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
227 rdev->pm.dynpm_can_downclock = false0;
228 } else {
229 if (rdev->pm.active_crtc_count > 1) {
230 for (i = 0; i < rdev->pm.num_power_states; i++) {
231 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY(1 << 0))
232 continue;
233 else if (i >= rdev->pm.current_power_state_index) {
234 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
235 break;
236 } else {
237 rdev->pm.requested_power_state_index = i;
238 break;
239 }
240 }
241 } else
242 rdev->pm.requested_power_state_index =
243 rdev->pm.current_power_state_index - 1;
244 }
245 /* don't use the power state if crtcs are active and no display flag is set */
246 if ((rdev->pm.active_crtc_count > 0) &&
247 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
248 RADEON_PM_MODE_NO_DISPLAY(1 << 0))) {
249 rdev->pm.requested_power_state_index++;
250 }
251 break;
252 case DYNPM_ACTION_UPCLOCK:
253 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
254 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
255 rdev->pm.dynpm_can_upclock = false0;
256 } else {
257 if (rdev->pm.active_crtc_count > 1) {
258 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
259 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY(1 << 0))
260 continue;
261 else if (i <= rdev->pm.current_power_state_index) {
262 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
263 break;
264 } else {
265 rdev->pm.requested_power_state_index = i;
266 break;
267 }
268 }
269 } else
270 rdev->pm.requested_power_state_index =
271 rdev->pm.current_power_state_index + 1;
272 }
273 break;
274 case DYNPM_ACTION_DEFAULT:
275 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
276 rdev->pm.dynpm_can_upclock = false0;
277 break;
278 case DYNPM_ACTION_NONE:
279 default:
280 DRM_ERROR("Requested mode for not defined action\n")__drm_err("Requested mode for not defined action\n");
281 return;
282 }
283 /* only one clock mode per power state */
284 rdev->pm.requested_clock_mode_index = 0;
285
286 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",__drm_dbg(DRM_UT_DRIVER, "Requested: e: %d m: %d p: %d\n", rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
pcie_lanes)
287 rdev->pm.power_state[rdev->pm.requested_power_state_index].__drm_dbg(DRM_UT_DRIVER, "Requested: e: %d m: %d p: %d\n", rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
pcie_lanes)
288 clock_info[rdev->pm.requested_clock_mode_index].sclk,__drm_dbg(DRM_UT_DRIVER, "Requested: e: %d m: %d p: %d\n", rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
pcie_lanes)
289 rdev->pm.power_state[rdev->pm.requested_power_state_index].__drm_dbg(DRM_UT_DRIVER, "Requested: e: %d m: %d p: %d\n", rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
pcie_lanes)
290 clock_info[rdev->pm.requested_clock_mode_index].mclk,__drm_dbg(DRM_UT_DRIVER, "Requested: e: %d m: %d p: %d\n", rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
pcie_lanes)
291 rdev->pm.power_state[rdev->pm.requested_power_state_index].__drm_dbg(DRM_UT_DRIVER, "Requested: e: %d m: %d p: %d\n", rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
pcie_lanes)
292 pcie_lanes)__drm_dbg(DRM_UT_DRIVER, "Requested: e: %d m: %d p: %d\n", rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk, rdev
->pm.power_state[rdev->pm.requested_power_state_index].
pcie_lanes)
;
293}
294
295/**
296 * r100_pm_init_profile - Initialize power profiles callback.
297 *
298 * @rdev: radeon_device pointer
299 *
300 * Initialize the power states used in profile mode
301 * (r1xx-r3xx).
302 * Used for profile mode only.
303 */
304void r100_pm_init_profile(struct radeon_device *rdev)
305{
306 /* default */
307 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX0].dpms_off_ps_idx = rdev->pm.default_power_state_index;
308 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX0].dpms_on_ps_idx = rdev->pm.default_power_state_index;
309 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX0].dpms_off_cm_idx = 0;
310 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX0].dpms_on_cm_idx = 0;
311 /* low sh */
312 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX1].dpms_off_ps_idx = 0;
313 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX1].dpms_on_ps_idx = 0;
314 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX1].dpms_off_cm_idx = 0;
315 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX1].dpms_on_cm_idx = 0;
316 /* mid sh */
317 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX2].dpms_off_ps_idx = 0;
318 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX2].dpms_on_ps_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX2].dpms_off_cm_idx = 0;
320 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX2].dpms_on_cm_idx = 0;
321 /* high sh */
322 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX3].dpms_off_ps_idx = 0;
323 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX3].dpms_on_ps_idx = rdev->pm.default_power_state_index;
324 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX3].dpms_off_cm_idx = 0;
325 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX3].dpms_on_cm_idx = 0;
326 /* low mh */
327 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX4].dpms_off_ps_idx = 0;
328 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX4].dpms_on_ps_idx = rdev->pm.default_power_state_index;
329 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX4].dpms_off_cm_idx = 0;
330 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX4].dpms_on_cm_idx = 0;
331 /* mid mh */
332 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX5].dpms_off_ps_idx = 0;
333 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX5].dpms_on_ps_idx = rdev->pm.default_power_state_index;
334 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX5].dpms_off_cm_idx = 0;
335 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX5].dpms_on_cm_idx = 0;
336 /* high mh */
337 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX6].dpms_off_ps_idx = 0;
338 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX6].dpms_on_ps_idx = rdev->pm.default_power_state_index;
339 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX6].dpms_off_cm_idx = 0;
340 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX6].dpms_on_cm_idx = 0;
341}
342
343/**
344 * r100_pm_misc - set additional pm hw parameters callback.
345 *
346 * @rdev: radeon_device pointer
347 *
348 * Set non-clock parameters associated with a power state
349 * (voltage, pcie lanes, etc.) (r1xx-r4xx).
350 */
351void r100_pm_misc(struct radeon_device *rdev)
352{
353 int requested_index = rdev->pm.requested_power_state_index;
354 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
355 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
356 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
357
358 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
359 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT0x00000004L) {
360 tmp = RREG32(voltage->gpio.reg)r100_mm_rreg(rdev, (voltage->gpio.reg), 0);
361 if (voltage->active_high)
362 tmp |= voltage->gpio.mask;
363 else
364 tmp &= ~(voltage->gpio.mask);
365 WREG32(voltage->gpio.reg, tmp)r100_mm_wreg(rdev, (voltage->gpio.reg), (tmp), 0);
366 if (voltage->delay)
367 udelay(voltage->delay);
368 } else {
369 tmp = RREG32(voltage->gpio.reg)r100_mm_rreg(rdev, (voltage->gpio.reg), 0);
370 if (voltage->active_high)
371 tmp &= ~voltage->gpio.mask;
372 else
373 tmp |= voltage->gpio.mask;
374 WREG32(voltage->gpio.reg, tmp)r100_mm_wreg(rdev, (voltage->gpio.reg), (tmp), 0);
375 if (voltage->delay)
376 udelay(voltage->delay);
377 }
378 }
379
380 sclk_cntl = RREG32_PLL(SCLK_CNTL)rdev->pll_rreg(rdev, (0xd));
381 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2)rdev->pll_rreg(rdev, (0x1e));
382 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3)((3) << 17);
383 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL)rdev->pll_rreg(rdev, (0x35));
384 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3)((3) << 20);
385 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN0x00000100L) {
386 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN(1 << 16);
387 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE0x00400000L)
388 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE(1 << 16);
389 else
390 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE(1 << 16);
391 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_20x00800000L)
392 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0)((0) << 17);
393 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_40x01000000L)
394 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2)((2) << 17);
395 } else
396 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN(1 << 16);
397
398 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN0x00000200L) {
399 sclk_more_cntl |= IO_CG_VOLTAGE_DROP(1 << 17);
400 if (voltage->delay) {
401 sclk_more_cntl |= VOLTAGE_DROP_SYNC(1 << 19);
402 switch (voltage->delay) {
403 case 33:
404 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0)((0) << 20);
405 break;
406 case 66:
407 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1)((1) << 20);
408 break;
409 case 99:
410 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2)((2) << 20);
411 break;
412 case 132:
413 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3)((3) << 20);
414 break;
415 }
416 } else
417 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC(1 << 19);
418 } else
419 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP(1 << 17);
420
421 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN0x02000000L)
422 sclk_cntl &= ~FORCE_HDP(1 << 17);
423 else
424 sclk_cntl |= FORCE_HDP(1 << 17);
425
426 WREG32_PLL(SCLK_CNTL, sclk_cntl)rdev->pll_wreg(rdev, (0xd), (sclk_cntl));
427 WREG32_PLL(SCLK_CNTL2, sclk_cntl2)rdev->pll_wreg(rdev, (0x1e), (sclk_cntl2));
428 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl)rdev->pll_wreg(rdev, (0x35), (sclk_more_cntl));
429
430 /* set pcie lanes */
431 if ((rdev->flags & RADEON_IS_PCIE) &&
432 !(rdev->flags & RADEON_IS_IGP) &&
433 rdev->asic->pm.set_pcie_lanes &&
434 (ps->pcie_lanes !=
435 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
436 radeon_set_pcie_lanes(rdev,(rdev)->asic->pm.set_pcie_lanes((rdev), (ps->pcie_lanes
))
437 ps->pcie_lanes)(rdev)->asic->pm.set_pcie_lanes((rdev), (ps->pcie_lanes
))
;
438 DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes)__drm_dbg(DRM_UT_DRIVER, "Setting: p: %d\n", ps->pcie_lanes
)
;
439 }
440}
441
442/**
443 * r100_pm_prepare - pre-power state change callback.
444 *
445 * @rdev: radeon_device pointer
446 *
447 * Prepare for a power state change (r1xx-r4xx).
448 */
449void r100_pm_prepare(struct radeon_device *rdev)
450{
451 struct drm_device *ddev = rdev->ddev;
452 struct drm_crtc *crtc;
453 struct radeon_crtc *radeon_crtc;
454 u32 tmp;
455
456 /* disable any active CRTCs */
457 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->head
) *__mptr = ((&ddev->mode_config.crtc_list)->next)
; (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*crtc), head) );}); &crtc->head != (&ddev->mode_config
.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)
->head ) *__mptr = (crtc->head.next); (__typeof(*crtc) *
)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), head)
);}))
{
458 radeon_crtc = to_radeon_crtc(crtc)({ const __typeof( ((struct radeon_crtc *)0)->base ) *__mptr
= (crtc); (struct radeon_crtc *)( (char *)__mptr - __builtin_offsetof
(struct radeon_crtc, base) );})
;
459 if (radeon_crtc->enabled) {
460 if (radeon_crtc->crtc_id) {
461 tmp = RREG32(RADEON_CRTC2_GEN_CNTL)r100_mm_rreg(rdev, (0x03f8), 0);
462 tmp |= RADEON_CRTC2_DISP_REQ_EN_B(1 << 26);
463 WREG32(RADEON_CRTC2_GEN_CNTL, tmp)r100_mm_wreg(rdev, (0x03f8), (tmp), 0);
464 } else {
465 tmp = RREG32(RADEON_CRTC_GEN_CNTL)r100_mm_rreg(rdev, (0x0050), 0);
466 tmp |= RADEON_CRTC_DISP_REQ_EN_B(1 << 26);
467 WREG32(RADEON_CRTC_GEN_CNTL, tmp)r100_mm_wreg(rdev, (0x0050), (tmp), 0);
468 }
469 }
470 }
471}
472
473/**
474 * r100_pm_finish - post-power state change callback.
475 *
476 * @rdev: radeon_device pointer
477 *
478 * Clean up after a power state change (r1xx-r4xx).
479 */
480void r100_pm_finish(struct radeon_device *rdev)
481{
482 struct drm_device *ddev = rdev->ddev;
483 struct drm_crtc *crtc;
484 struct radeon_crtc *radeon_crtc;
485 u32 tmp;
486
487 /* enable any active CRTCs */
488 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head)for (crtc = ({ const __typeof( ((__typeof(*crtc) *)0)->head
) *__mptr = ((&ddev->mode_config.crtc_list)->next)
; (__typeof(*crtc) *)( (char *)__mptr - __builtin_offsetof(__typeof
(*crtc), head) );}); &crtc->head != (&ddev->mode_config
.crtc_list); crtc = ({ const __typeof( ((__typeof(*crtc) *)0)
->head ) *__mptr = (crtc->head.next); (__typeof(*crtc) *
)( (char *)__mptr - __builtin_offsetof(__typeof(*crtc), head)
);}))
{
489 radeon_crtc = to_radeon_crtc(crtc)({ const __typeof( ((struct radeon_crtc *)0)->base ) *__mptr
= (crtc); (struct radeon_crtc *)( (char *)__mptr - __builtin_offsetof
(struct radeon_crtc, base) );})
;
490 if (radeon_crtc->enabled) {
491 if (radeon_crtc->crtc_id) {
492 tmp = RREG32(RADEON_CRTC2_GEN_CNTL)r100_mm_rreg(rdev, (0x03f8), 0);
493 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B(1 << 26);
494 WREG32(RADEON_CRTC2_GEN_CNTL, tmp)r100_mm_wreg(rdev, (0x03f8), (tmp), 0);
495 } else {
496 tmp = RREG32(RADEON_CRTC_GEN_CNTL)r100_mm_rreg(rdev, (0x0050), 0);
497 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B(1 << 26);
498 WREG32(RADEON_CRTC_GEN_CNTL, tmp)r100_mm_wreg(rdev, (0x0050), (tmp), 0);
499 }
500 }
501 }
502}
503
504/**
505 * r100_gui_idle - gui idle callback.
506 *
507 * @rdev: radeon_device pointer
508 *
509 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
510 * Returns true if idle, false if not.
511 */
512bool_Bool r100_gui_idle(struct radeon_device *rdev)
513{
514 if (RREG32(RADEON_RBBM_STATUS)r100_mm_rreg(rdev, (0x0e40), 0) & RADEON_RBBM_ACTIVE(1 << 31))
515 return false0;
516 else
517 return true1;
518}
519
520/* hpd for digital panel detect/disconnect */
521/**
522 * r100_hpd_sense - hpd sense callback.
523 *
524 * @rdev: radeon_device pointer
525 * @hpd: hpd (hotplug detect) pin
526 *
527 * Checks if a digital monitor is connected (r1xx-r4xx).
528 * Returns true if connected, false if not connected.
529 */
530bool_Bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
531{
532 bool_Bool connected = false0;
533
534 switch (hpd) {
535 case RADEON_HPD_1:
536 if (RREG32(RADEON_FP_GEN_CNTL)r100_mm_rreg(rdev, (0x0284), 0) & RADEON_FP_DETECT_SENSE(1 << 8))
537 connected = true1;
538 break;
539 case RADEON_HPD_2:
540 if (RREG32(RADEON_FP2_GEN_CNTL)r100_mm_rreg(rdev, (0x0288), 0) & RADEON_FP2_DETECT_SENSE(1 << 8))
541 connected = true1;
542 break;
543 default:
544 break;
545 }
546 return connected;
547}
548
549/**
550 * r100_hpd_set_polarity - hpd set polarity callback.
551 *
552 * @rdev: radeon_device pointer
553 * @hpd: hpd (hotplug detect) pin
554 *
555 * Set the polarity of the hpd pin (r1xx-r4xx).
556 */
557void r100_hpd_set_polarity(struct radeon_device *rdev,
558 enum radeon_hpd_id hpd)
559{
560 u32 tmp;
561 bool_Bool connected = r100_hpd_sense(rdev, hpd);
562
563 switch (hpd) {
564 case RADEON_HPD_1:
565 tmp = RREG32(RADEON_FP_GEN_CNTL)r100_mm_rreg(rdev, (0x0284), 0);
566 if (connected)
567 tmp &= ~RADEON_FP_DETECT_INT_POL(1 << 9);
568 else
569 tmp |= RADEON_FP_DETECT_INT_POL(1 << 9);
570 WREG32(RADEON_FP_GEN_CNTL, tmp)r100_mm_wreg(rdev, (0x0284), (tmp), 0);
571 break;
572 case RADEON_HPD_2:
573 tmp = RREG32(RADEON_FP2_GEN_CNTL)r100_mm_rreg(rdev, (0x0288), 0);
574 if (connected)
575 tmp &= ~RADEON_FP2_DETECT_INT_POL(1 << 9);
576 else
577 tmp |= RADEON_FP2_DETECT_INT_POL(1 << 9);
578 WREG32(RADEON_FP2_GEN_CNTL, tmp)r100_mm_wreg(rdev, (0x0288), (tmp), 0);
579 break;
580 default:
581 break;
582 }
583}
584
585/**
586 * r100_hpd_init - hpd setup callback.
587 *
588 * @rdev: radeon_device pointer
589 *
590 * Setup the hpd pins used by the card (r1xx-r4xx).
591 * Set the polarity, and enable the hpd interrupts.
592 */
593void r100_hpd_init(struct radeon_device *rdev)
594{
595 struct drm_device *dev = rdev->ddev;
596 struct drm_connector *connector;
597 unsigned enable = 0;
598
599 list_for_each_entry(connector, &dev->mode_config.connector_list, head)for (connector = ({ const __typeof( ((__typeof(*connector) *)
0)->head ) *__mptr = ((&dev->mode_config.connector_list
)->next); (__typeof(*connector) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*connector), head) );}); &connector->head !=
(&dev->mode_config.connector_list); connector = ({ const
__typeof( ((__typeof(*connector) *)0)->head ) *__mptr = (
connector->head.next); (__typeof(*connector) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*connector), head) );}))
{
600 struct radeon_connector *radeon_connector = to_radeon_connector(connector)({ const __typeof( ((struct radeon_connector *)0)->base ) *
__mptr = (connector); (struct radeon_connector *)( (char *)__mptr
- __builtin_offsetof(struct radeon_connector, base) );})
;
601 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
602 enable |= 1 << radeon_connector->hpd.hpd;
603 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd)(rdev)->asic->hpd.set_polarity((rdev), (radeon_connector
->hpd.hpd))
;
604 }
605 radeon_irq_kms_enable_hpd(rdev, enable);
606}
607
608/**
609 * r100_hpd_fini - hpd tear down callback.
610 *
611 * @rdev: radeon_device pointer
612 *
613 * Tear down the hpd pins used by the card (r1xx-r4xx).
614 * Disable the hpd interrupts.
615 */
616void r100_hpd_fini(struct radeon_device *rdev)
617{
618 struct drm_device *dev = rdev->ddev;
619 struct drm_connector *connector;
620 unsigned disable = 0;
621
622 list_for_each_entry(connector, &dev->mode_config.connector_list, head)for (connector = ({ const __typeof( ((__typeof(*connector) *)
0)->head ) *__mptr = ((&dev->mode_config.connector_list
)->next); (__typeof(*connector) *)( (char *)__mptr - __builtin_offsetof
(__typeof(*connector), head) );}); &connector->head !=
(&dev->mode_config.connector_list); connector = ({ const
__typeof( ((__typeof(*connector) *)0)->head ) *__mptr = (
connector->head.next); (__typeof(*connector) *)( (char *)__mptr
- __builtin_offsetof(__typeof(*connector), head) );}))
{
623 struct radeon_connector *radeon_connector = to_radeon_connector(connector)({ const __typeof( ((struct radeon_connector *)0)->base ) *
__mptr = (connector); (struct radeon_connector *)( (char *)__mptr
- __builtin_offsetof(struct radeon_connector, base) );})
;
624 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
625 disable |= 1 << radeon_connector->hpd.hpd;
626 }
627 radeon_irq_kms_disable_hpd(rdev, disable);
628}
629
630/*
631 * PCI GART
632 */
633void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
634{
635 /* TODO: can we do somethings here ? */
636 /* It seems hw only cache one entry so we should discard this
637 * entry otherwise if first GPU GART read hit this entry it
638 * could end up in wrong address. */
639}
640
641int r100_pci_gart_init(struct radeon_device *rdev)
642{
643 int r;
644
645 if (rdev->gart.ptr) {
646 WARN(1, "R100 PCI GART already initialized\n")({ int __ret = !!(1); if (__ret) printf("R100 PCI GART already initialized\n"
); __builtin_expect(!!(__ret), 0); })
;
647 return 0;
648 }
649 /* Initialize common gart structure */
650 r = radeon_gart_init(rdev);
651 if (r)
652 return r;
653 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
654 rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
655 rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
656 rdev->asic->gart.set_page = &r100_pci_gart_set_page;
657 return radeon_gart_table_ram_alloc(rdev);
658}
659
660int r100_pci_gart_enable(struct radeon_device *rdev)
661{
662 uint32_t tmp;
663
664 /* discard memory request outside of configured range */
665 tmp = RREG32(RADEON_AIC_CNTL)r100_mm_rreg(rdev, (0x01d0), 0) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS(1 << 1);
666 WREG32(RADEON_AIC_CNTL, tmp)r100_mm_wreg(rdev, (0x01d0), (tmp), 0);
667 /* set address range for PCI address translate */
668 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start)r100_mm_wreg(rdev, (0x01dc), (rdev->mc.gtt_start), 0);
669 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end)r100_mm_wreg(rdev, (0x01e0), (rdev->mc.gtt_end), 0);
670 /* set PCI GART page-table base address */
671 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr)r100_mm_wreg(rdev, (0x01d8), (rdev->gart.table_addr), 0);
672 tmp = RREG32(RADEON_AIC_CNTL)r100_mm_rreg(rdev, (0x01d0), 0) | RADEON_PCIGART_TRANSLATE_EN(1 << 0);
673 WREG32(RADEON_AIC_CNTL, tmp)r100_mm_wreg(rdev, (0x01d0), (tmp), 0);
674 r100_pci_gart_tlb_flush(rdev);
675 DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",printk("\0016" "[" "drm" "] " "PCI GART of %uM enabled (table at 0x%016llX).\n"
, (unsigned)(rdev->mc.gtt_size >> 20), (unsigned long
long)rdev->gart.table_addr)
676 (unsigned)(rdev->mc.gtt_size >> 20),printk("\0016" "[" "drm" "] " "PCI GART of %uM enabled (table at 0x%016llX).\n"
, (unsigned)(rdev->mc.gtt_size >> 20), (unsigned long
long)rdev->gart.table_addr)
677 (unsigned long long)rdev->gart.table_addr)printk("\0016" "[" "drm" "] " "PCI GART of %uM enabled (table at 0x%016llX).\n"
, (unsigned)(rdev->mc.gtt_size >> 20), (unsigned long
long)rdev->gart.table_addr)
;
678 rdev->gart.ready = true1;
679 return 0;
680}
681
682void r100_pci_gart_disable(struct radeon_device *rdev)
683{
684 uint32_t tmp;
685
686 /* discard memory request outside of configured range */
687 tmp = RREG32(RADEON_AIC_CNTL)r100_mm_rreg(rdev, (0x01d0), 0) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS(1 << 1);
688 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN)r100_mm_wreg(rdev, (0x01d0), (tmp & ~(1 << 0)), 0);
689 WREG32(RADEON_AIC_LO_ADDR, 0)r100_mm_wreg(rdev, (0x01dc), (0), 0);
690 WREG32(RADEON_AIC_HI_ADDR, 0)r100_mm_wreg(rdev, (0x01e0), (0), 0);
691}
692
693uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
694{
695 return addr;
696}
697
698void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
699 uint64_t entry)
700{
701 u32 *gtt = rdev->gart.ptr;
702 gtt[i] = cpu_to_le32(lower_32_bits(entry))((__uint32_t)(((u32)(entry))));
703}
704
705void r100_pci_gart_fini(struct radeon_device *rdev)
706{
707 radeon_gart_fini(rdev);
708 r100_pci_gart_disable(rdev);
709 radeon_gart_table_ram_free(rdev);
710}
711
712int r100_irq_set(struct radeon_device *rdev)
713{
714 uint32_t tmp = 0;
715
716 if (!rdev->irq.installed) {
260
Assuming field 'installed' is false
261
Taking true branch
717 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n")({ int __ret = !!(1); if (__ret) printf("Can't enable IRQ/MSI because no handler is installed\n"
); __builtin_expect(!!(__ret), 0); })
;
262
Taking true branch
718 WREG32(R_000040_GEN_INT_CNTL, 0)r100_mm_wreg(rdev, (0x000040), (0), 0);
263
Calling 'r100_mm_wreg'
267
Returning from 'r100_mm_wreg'
719 return -EINVAL22;
268
Returning without writing to 'rdev->me_fw'
720 }
721 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])({ typeof(*(&rdev->irq.ring_int[0])) __tmp = *(volatile
typeof(*(&rdev->irq.ring_int[0])) *)&(*(&rdev
->irq.ring_int[0])); membar_datadep_consumer(); __tmp; })
) {
722 tmp |= RADEON_SW_INT_ENABLE(1 << 25);
723 }
724 if (rdev->irq.crtc_vblank_int[0] ||
725 atomic_read(&rdev->irq.pflip[0])({ typeof(*(&rdev->irq.pflip[0])) __tmp = *(volatile typeof
(*(&rdev->irq.pflip[0])) *)&(*(&rdev->irq.pflip
[0])); membar_datadep_consumer(); __tmp; })
) {
726 tmp |= RADEON_CRTC_VBLANK_MASK(1 << 0);
727 }
728 if (rdev->irq.crtc_vblank_int[1] ||
729 atomic_read(&rdev->irq.pflip[1])({ typeof(*(&rdev->irq.pflip[1])) __tmp = *(volatile typeof
(*(&rdev->irq.pflip[1])) *)&(*(&rdev->irq.pflip
[1])); membar_datadep_consumer(); __tmp; })
) {
730 tmp |= RADEON_CRTC2_VBLANK_MASK(1 << 9);
731 }
732 if (rdev->irq.hpd[0]) {
733 tmp |= RADEON_FP_DETECT_MASK(1 << 4);
734 }
735 if (rdev->irq.hpd[1]) {
736 tmp |= RADEON_FP2_DETECT_MASK(1 << 10);
737 }
738 WREG32(RADEON_GEN_INT_CNTL, tmp)r100_mm_wreg(rdev, (0x0040), (tmp), 0);
739
740 /* read back to post the write */
741 RREG32(RADEON_GEN_INT_CNTL)r100_mm_rreg(rdev, (0x0040), 0);
742
743 return 0;
744}
745
746void r100_irq_disable(struct radeon_device *rdev)
747{
748 u32 tmp;
749
750 WREG32(R_000040_GEN_INT_CNTL, 0)r100_mm_wreg(rdev, (0x000040), (0), 0);
751 /* Wait and acknowledge irq */
752 mdelay(1);
753 tmp = RREG32(R_000044_GEN_INT_STATUS)r100_mm_rreg(rdev, (0x000044), 0);
754 WREG32(R_000044_GEN_INT_STATUS, tmp)r100_mm_wreg(rdev, (0x000044), (tmp), 0);
755}
756
757static uint32_t r100_irq_ack(struct radeon_device *rdev)
758{
759 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS)r100_mm_rreg(rdev, (0x0044), 0);
760 uint32_t irq_mask = RADEON_SW_INT_TEST(1 << 25) |
761 RADEON_CRTC_VBLANK_STAT(1 << 0) | RADEON_CRTC2_VBLANK_STAT(1 << 9) |
762 RADEON_FP_DETECT_STAT(1 << 4) | RADEON_FP2_DETECT_STAT(1 << 10);
763
764 if (irqs) {
765 WREG32(RADEON_GEN_INT_STATUS, irqs)r100_mm_wreg(rdev, (0x0044), (irqs), 0);
766 }
767 return irqs & irq_mask;
768}
769
770int r100_irq_process(struct radeon_device *rdev)
771{
772 uint32_t status, msi_rearm;
773 bool_Bool queue_hotplug = false0;
774
775 status = r100_irq_ack(rdev);
776 if (!status) {
777 return IRQ_NONE;
778 }
779 if (rdev->shutdown) {
780 return IRQ_NONE;
781 }
782 while (status) {
783 /* SW interrupt */
784 if (status & RADEON_SW_INT_TEST(1 << 25)) {
785 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX0);
786 }
787 /* Vertical blank interrupts */
788 if (status & RADEON_CRTC_VBLANK_STAT(1 << 0)) {
789 if (rdev->irq.crtc_vblank_int[0]) {
790 drm_handle_vblank(rdev->ddev, 0);
791 rdev->pm.vblank_sync = true1;
792 wake_up(&rdev->irq.vblank_queue);
793 }
794 if (atomic_read(&rdev->irq.pflip[0])({ typeof(*(&rdev->irq.pflip[0])) __tmp = *(volatile typeof
(*(&rdev->irq.pflip[0])) *)&(*(&rdev->irq.pflip
[0])); membar_datadep_consumer(); __tmp; })
)
795 radeon_crtc_handle_vblank(rdev, 0);
796 }
797 if (status & RADEON_CRTC2_VBLANK_STAT(1 << 9)) {
798 if (rdev->irq.crtc_vblank_int[1]) {
799 drm_handle_vblank(rdev->ddev, 1);
800 rdev->pm.vblank_sync = true1;
801 wake_up(&rdev->irq.vblank_queue);
802 }
803 if (atomic_read(&rdev->irq.pflip[1])({ typeof(*(&rdev->irq.pflip[1])) __tmp = *(volatile typeof
(*(&rdev->irq.pflip[1])) *)&(*(&rdev->irq.pflip
[1])); membar_datadep_consumer(); __tmp; })
)
804 radeon_crtc_handle_vblank(rdev, 1);
805 }
806 if (status & RADEON_FP_DETECT_STAT(1 << 4)) {
807 queue_hotplug = true1;
808 DRM_DEBUG("HPD1\n")__drm_dbg(DRM_UT_CORE, "HPD1\n");
809 }
810 if (status & RADEON_FP2_DETECT_STAT(1 << 10)) {
811 queue_hotplug = true1;
812 DRM_DEBUG("HPD2\n")__drm_dbg(DRM_UT_CORE, "HPD2\n");
813 }
814 status = r100_irq_ack(rdev);
815 }
816 if (queue_hotplug)
817 schedule_delayed_work(&rdev->hotplug_work, 0);
818 if (rdev->msi_enabled) {
819 switch (rdev->family) {
820 case CHIP_RS400:
821 case CHIP_RS480:
822 msi_rearm = RREG32(RADEON_AIC_CNTL)r100_mm_rreg(rdev, (0x01d0), 0) & ~RS400_MSI_REARM(1 << 3);
823 WREG32(RADEON_AIC_CNTL, msi_rearm)r100_mm_wreg(rdev, (0x01d0), (msi_rearm), 0);
824 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM)r100_mm_wreg(rdev, (0x01d0), (msi_rearm | (1 << 3)), 0);
825 break;
826 default:
827 WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN)r100_mm_wreg(rdev, (0x0160), ((1 << 0)), 0);
828 break;
829 }
830 }
831 return IRQ_HANDLED;
832}
833
834u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
835{
836 if (crtc == 0)
837 return RREG32(RADEON_CRTC_CRNT_FRAME)r100_mm_rreg(rdev, (0x0214), 0);
838 else
839 return RREG32(RADEON_CRTC2_CRNT_FRAME)r100_mm_rreg(rdev, (0x0314), 0);
840}
841
842/**
843 * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
844 * rdev: radeon device structure
845 * ring: ring buffer struct for emitting packets
846 */
847static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
848{
849 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)(0x00000000 | ((((0x0130) >> 2) << 0) & (0x1ffff
<< 0)) | ((((0)) << 16) & (0x3fff << 16
)))
);
850 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
851 RADEON_HDP_READ_BUFFER_INVALIDATE(1 << 27));
852 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)(0x00000000 | ((((0x0130) >> 2) << 0) & (0x1ffff
<< 0)) | ((((0)) << 16) & (0x3fff << 16
)))
);
853 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
854}
855
856/* Who ever call radeon_fence_emit should call ring_lock and ask
857 * for enough space (today caller are ib schedule and buffer move) */
858void r100_fence_ring_emit(struct radeon_device *rdev,
859 struct radeon_fence *fence)
860{
861 struct radeon_ring *ring = &rdev->ring[fence->ring];
862
863 /* We have to make sure that caches are flushed before
864 * CPU might read something from VRAM. */
865 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)(0x00000000 | ((((0x325C) >> 2) << 0) & (0x1ffff
<< 0)) | ((((0)) << 16) & (0x3fff << 16
)))
);
866 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL0xf);
867 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)(0x00000000 | ((((0x3254) >> 2) << 0) & (0x1ffff
<< 0)) | ((((0)) << 16) & (0x3fff << 16
)))
);
868 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL0x5);
869 /* Wait until IDLE & CLEAN */
870 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)(0x00000000 | ((((0x1720) >> 2) << 0) & (0x1ffff
<< 0)) | ((((0)) << 16) & (0x3fff << 16
)))
);
871 radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN(1 << 16) | RADEON_WAIT_3D_IDLECLEAN(1 << 17));
872 r100_ring_hdp_flush(rdev, ring);
873 /* Emit fence sequence & fire IRQ */
874 radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)(0x00000000 | ((((rdev->fence_drv[fence->ring].scratch_reg
) >> 2) << 0) & (0x1ffff << 0)) | ((((0
)) << 16) & (0x3fff << 16)))
);
875 radeon_ring_write(ring, fence->seq);
876 radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)(0x00000000 | ((((0x0044) >> 2) << 0) & (0x1ffff
<< 0)) | ((((0)) << 16) & (0x3fff << 16
)))
);
877 radeon_ring_write(ring, RADEON_SW_INT_FIRE(1 << 26));
878}
879
880bool_Bool r100_semaphore_ring_emit(struct radeon_device *rdev,
881 struct radeon_ring *ring,
882 struct radeon_semaphore *semaphore,
883 bool_Bool emit_wait)
884{
885 /* Unused on older asics, since we don't have semaphores or multiple rings */
886 BUG()do { panic("BUG at %s:%d", "/usr/src/sys/dev/pci/drm/radeon/r100.c"
, 886); } while (0)
;
887 return false0;
888}
889
890struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
891 uint64_t src_offset,
892 uint64_t dst_offset,
893 unsigned num_gpu_pages,
894 struct dma_resv *resv)
895{
896 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX0];
897 struct radeon_fence *fence;
898 uint32_t cur_pages;
899 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE4096;
900 uint32_t pitch;
901 uint32_t stride_pixels;
902 unsigned ndw;
903 int num_loops;
904 int r = 0;
905
906 /* radeon limited to 16k stride */
907 stride_bytes &= 0x3fff;
908 /* radeon pitch is /64 */
909 pitch = stride_bytes / 64;
910 stride_pixels = stride_bytes / 4;
911 num_loops = DIV_ROUND_UP(num_gpu_pages, 8191)(((num_gpu_pages) + ((8191) - 1)) / (8191));
912
913 /* Ask for enough room for blit + flush + fence */
914 ndw = 64 + (10 * num_loops);
915 r = radeon_ring_lock(rdev, ring, ndw);
916 if (r) {
917 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw)__drm_err("radeon: moving bo (%d) asking for %u dw.\n", r, ndw
)
;
918 return ERR_PTR(-EINVAL22);
919 }
920 while (num_gpu_pages > 0) {
921 cur_pages = num_gpu_pages;
922 if (cur_pages > 8191) {
923 cur_pages = 8191;
924 }
925 num_gpu_pages -= cur_pages;
926
927 /* pages are in Y direction - height
928 page width in X direction - width */
929 radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8)(0xC0000000 | ((((0x9B)) << 8) & (0xff << 8))
| ((((8)) << 16) & (0x3fff << 16)))
);
930 radeon_ring_write(ring,
931 RADEON_GMC_SRC_PITCH_OFFSET_CNTL(1 << 0) |
932 RADEON_GMC_DST_PITCH_OFFSET_CNTL(1 << 1) |
933 RADEON_GMC_SRC_CLIPPING(1 << 2) |
934 RADEON_GMC_DST_CLIPPING(1 << 3) |
935 RADEON_GMC_BRUSH_NONE(15 << 4) |
936 (RADEON_COLOR_FORMAT_ARGB88886 << 8) |
937 RADEON_GMC_SRC_DATATYPE_COLOR(3 << 12) |
938 RADEON_ROP3_S0x00cc0000 |
939 RADEON_DP_SRC_SOURCE_MEMORY(2 << 24) |
940 RADEON_GMC_CLR_CMP_CNTL_DIS(1 << 28) |
941 RADEON_GMC_WR_MSK_DIS(1 << 30));
942 radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
943 radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
944 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
945 radeon_ring_write(ring, 0);
946 radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
947 radeon_ring_write(ring, num_gpu_pages);
948 radeon_ring_write(ring, num_gpu_pages);
949 radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
950 }
951 radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)(0x00000000 | ((((0x1714) >> 2) << 0) & (0x1ffff
<< 0)) | ((((0)) << 16) & (0x3fff << 16
)))
);
952 radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL0xf);
953 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)(0x00000000 | ((((0x1720) >> 2) << 0) & (0x1ffff
<< 0)) | ((((0)) << 16) & (0x3fff << 16
)))
);
954 radeon_ring_write(ring,
955 RADEON_WAIT_2D_IDLECLEAN(1 << 16) |
956 RADEON_WAIT_HOST_IDLECLEAN(1 << 18) |
957 RADEON_WAIT_DMA_GUI_IDLE(1 << 9));
958 r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX0);
959 if (r) {
960 radeon_ring_unlock_undo(rdev, ring);
961 return ERR_PTR(r);
962 }
963 radeon_ring_unlock_commit(rdev, ring, false0);
964 return fence;
965}
966
967static int r100_cp_wait_for_idle(struct radeon_device *rdev)
968{
969 unsigned i;
970 u32 tmp;
971
972 for (i = 0; i < rdev->usec_timeout; i++) {
973 tmp = RREG32(R_000E40_RBBM_STATUS)r100_mm_rreg(rdev, (0x000E40), 0);
974 if (!G_000E40_CP_CMDSTRM_BUSY(tmp)(((tmp) >> 16) & 0x1)) {
975 return 0;
976 }
977 udelay(1);
978 }
979 return -1;
980}
981
982void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
983{
984 int r;
985
986 r = radeon_ring_lock(rdev, ring, 2);
987 if (r) {
988 return;
989 }
990 radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)(0x00000000 | ((((0x1724) >> 2) << 0) & (0x1ffff
<< 0)) | ((((0)) << 16) & (0x3fff << 16
)))
);
991 radeon_ring_write(ring,
992 RADEON_ISYNC_ANY2D_IDLE3D(1 << 0) |
993 RADEON_ISYNC_ANY3D_IDLE2D(1 << 1) |
994 RADEON_ISYNC_WAIT_IDLEGUI(1 << 4) |
995 RADEON_ISYNC_CPSCRATCH_IDLEGUI(1 << 5));
996 radeon_ring_unlock_commit(rdev, ring, false0);
997}
998
999
1000/* Load the microcode for the CP */
1001static int r100_cp_init_microcode(struct radeon_device *rdev)
1002{
1003 const char *fw_name = NULL((void *)0);
1004 int err;
1005
1006 DRM_DEBUG_KMS("\n")__drm_dbg(DRM_UT_KMS, "\n");
1007
1008 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
282
Assuming field 'family' is not equal to CHIP_R100
283
Assuming field 'family' is not equal to CHIP_RV100
287
Taking false branch
1009 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
284
Assuming field 'family' is not equal to CHIP_RV200
285
Assuming field 'family' is not equal to CHIP_RS100
1010 (rdev->family == CHIP_RS200)) {
286
Assuming field 'family' is not equal to CHIP_RS200
1011 DRM_INFO("Loading R100 Microcode\n")printk("\0016" "[" "drm" "] " "Loading R100 Microcode\n");
1012 fw_name = FIRMWARE_R100"radeon/R100_cp.bin";
1013 } else if ((rdev->family == CHIP_R200) ||
288
Assuming field 'family' is not equal to CHIP_R200
292
Taking false branch
1014 (rdev->family == CHIP_RV250) ||
289
Assuming field 'family' is not equal to CHIP_RV250
1015 (rdev->family == CHIP_RV280) ||
290
Assuming field 'family' is not equal to CHIP_RV280
1016 (rdev->family == CHIP_RS300)) {
291
Assuming field 'family' is not equal to CHIP_RS300
1017 DRM_INFO("Loading R200 Microcode\n")printk("\0016" "[" "drm" "] " "Loading R200 Microcode\n");
1018 fw_name = FIRMWARE_R200"radeon/R200_cp.bin";
1019 } else if ((rdev->family == CHIP_R300) ||
293
Assuming field 'family' is not equal to CHIP_R300
299
Taking false branch
1020 (rdev->family == CHIP_R350) ||
294
Assuming field 'family' is not equal to CHIP_R350
1021 (rdev->family == CHIP_RV350) ||
295
Assuming field 'family' is not equal to CHIP_RV350
1022 (rdev->family == CHIP_RV380) ||
296
Assuming field 'family' is not equal to CHIP_RV380
1023 (rdev->family == CHIP_RS400) ||
297
Assuming field 'family' is not equal to CHIP_RS400
1024 (rdev->family == CHIP_RS480)) {
298
Assuming field 'family' is not equal to CHIP_RS480
1025 DRM_INFO("Loading R300 Microcode\n")printk("\0016" "[" "drm" "] " "Loading R300 Microcode\n");
1026 fw_name = FIRMWARE_R300"radeon/R300_cp.bin";
1027 } else if ((rdev->family == CHIP_R420) ||
300
Assuming field 'family' is not equal to CHIP_R420
303
Taking false branch
1028 (rdev->family == CHIP_R423) ||
301
Assuming field 'family' is not equal to CHIP_R423
1029 (rdev->family == CHIP_RV410)) {
302
Assuming field 'family' is not equal to CHIP_RV410
1030 DRM_INFO("Loading R400 Microcode\n")printk("\0016" "[" "drm" "] " "Loading R400 Microcode\n");
1031 fw_name = FIRMWARE_R420"radeon/R420_cp.bin";
1032 } else if ((rdev->family == CHIP_RS690) ||
304
Assuming field 'family' is equal to CHIP_RS690
1033 (rdev->family == CHIP_RS740)) {
1034 DRM_INFO("Loading RS690/RS740 Microcode\n")printk("\0016" "[" "drm" "] " "Loading RS690/RS740 Microcode\n"
)
;
1035 fw_name = FIRMWARE_RS690"radeon/RS690_cp.bin";
1036 } else if (rdev->family == CHIP_RS600) {
1037 DRM_INFO("Loading RS600 Microcode\n")printk("\0016" "[" "drm" "] " "Loading RS600 Microcode\n");
1038 fw_name = FIRMWARE_RS600"radeon/RS600_cp.bin";
1039 } else if ((rdev->family == CHIP_RV515) ||
1040 (rdev->family == CHIP_R520) ||
1041 (rdev->family == CHIP_RV530) ||
1042 (rdev->family == CHIP_R580) ||
1043 (rdev->family == CHIP_RV560) ||
1044 (rdev->family == CHIP_RV570)) {
1045 DRM_INFO("Loading R500 Microcode\n")printk("\0016" "[" "drm" "] " "Loading R500 Microcode\n");
1046 fw_name = FIRMWARE_R520"radeon/R520_cp.bin";
1047 }
1048
1049 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
305
Calling 'request_firmware'
309
Returning from 'request_firmware'
1050 if (err) {
310
Assuming 'err' is 0
311
Taking false branch
1051 pr_err("radeon_cp: Failed to load firmware \"%s\"\n", fw_name)printk("\0013" "radeon_cp: Failed to load firmware \"%s\"\n",
fw_name)
;
1052 } else if (rdev->me_fw->size % 8) {
312
Access to field 'size' results in a dereference of a null pointer (loaded from field 'me_fw')
1053 pr_err("radeon_cp: Bogus length %zu in firmware \"%s\"\n",printk("\0013" "radeon_cp: Bogus length %zu in firmware \"%s\"\n"
, rdev->me_fw->size, fw_name)
1054 rdev->me_fw->size, fw_name)printk("\0013" "radeon_cp: Bogus length %zu in firmware \"%s\"\n"
, rdev->me_fw->size, fw_name)
;
1055 err = -EINVAL22;
1056 release_firmware(rdev->me_fw);
1057 rdev->me_fw = NULL((void *)0);
1058 }
1059 return err;
1060}
1061
1062u32 r100_gfx_get_rptr(struct radeon_device *rdev,
1063 struct radeon_ring *ring)
1064{
1065 u32 rptr;
1066
1067 if (rdev->wb.enabled)
1068 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4])((__uint32_t)(rdev->wb.wb[ring->rptr_offs/4]));
1069 else
1070 rptr = RREG32(RADEON_CP_RB_RPTR)r100_mm_rreg(rdev, (0x0710), 0);
1071
1072 return rptr;
1073}
1074
1075u32 r100_gfx_get_wptr(struct radeon_device *rdev,
1076 struct radeon_ring *ring)
1077{
1078 return RREG32(RADEON_CP_RB_WPTR)r100_mm_rreg(rdev, (0x0714), 0);
1079}
1080
1081void r100_gfx_set_wptr(struct radeon_device *rdev,
1082 struct radeon_ring *ring)
1083{
1084 WREG32(RADEON_CP_RB_WPTR, ring->wptr)r100_mm_wreg(rdev, (0x0714), (ring->wptr), 0);
1085 (void)RREG32(RADEON_CP_RB_WPTR)r100_mm_rreg(rdev, (0x0714), 0);
1086}
1087
1088static void r100_cp_load_microcode(struct radeon_device *rdev)
1089{
1090 const __be32 *fw_data;
1091 int i, size;
1092
1093 if (r100_gui_wait_for_idle(rdev)) {
1094 pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n")printk("\0014" "Failed to wait GUI idle while programming pipes. Bad things might happen.\n"
)
;
1095 }
1096
1097 if (rdev->me_fw) {
1098 size = rdev->me_fw->size / 4;
1099 fw_data = (const __be32 *)&rdev->me_fw->data[0];
1100 WREG32(RADEON_CP_ME_RAM_ADDR, 0)r100_mm_wreg(rdev, (0x07d4), (0), 0);
1101 for (i = 0; i < size; i += 2) {
1102 WREG32(RADEON_CP_ME_RAM_DATAH,r100_mm_wreg(rdev, (0x07dc), ((__uint32_t)(__builtin_constant_p
(*(__uint32_t *)(&fw_data[i])) ? (__uint32_t)(((__uint32_t
)(*(__uint32_t *)(&fw_data[i])) & 0xff) << 24 |
((__uint32_t)(*(__uint32_t *)(&fw_data[i])) & 0xff00
) << 8 | ((__uint32_t)(*(__uint32_t *)(&fw_data[i])
) & 0xff0000) >> 8 | ((__uint32_t)(*(__uint32_t *)(
&fw_data[i])) & 0xff000000) >> 24) : __swap32md
(*(__uint32_t *)(&fw_data[i])))), 0)
1103 be32_to_cpup(&fw_data[i]))r100_mm_wreg(rdev, (0x07dc), ((__uint32_t)(__builtin_constant_p
(*(__uint32_t *)(&fw_data[i])) ? (__uint32_t)(((__uint32_t
)(*(__uint32_t *)(&fw_data[i])) & 0xff) << 24 |
((__uint32_t)(*(__uint32_t *)(&fw_data[i])) & 0xff00
) << 8 | ((__uint32_t)(*(__uint32_t *)(&fw_data[i])
) & 0xff0000) >> 8 | ((__uint32_t)(*(__uint32_t *)(
&fw_data[i])) & 0xff000000) >> 24) : __swap32md
(*(__uint32_t *)(&fw_data[i])))), 0)
;
1104 WREG32(RADEON_CP_ME_RAM_DATAL,r100_mm_wreg(rdev, (0x07e0), ((__uint32_t)(__builtin_constant_p
(*(__uint32_t *)(&fw_data[i + 1])) ? (__uint32_t)(((__uint32_t
)(*(__uint32_t *)(&fw_data[i + 1])) & 0xff) << 24
| ((__uint32_t)(*(__uint32_t *)(&fw_data[i + 1])) & 0xff00
) << 8 | ((__uint32_t)(*(__uint32_t *)(&fw_data[i +
1])) & 0xff0000) >> 8 | ((__uint32_t)(*(__uint32_t
*)(&fw_data[i + 1])) & 0xff000000) >> 24) : __swap32md
(*(__uint32_t *)(&fw_data[i + 1])))), 0)
1105 be32_to_cpup(&fw_data[i + 1]))r100_mm_wreg(rdev, (0x07e0), ((__uint32_t)(__builtin_constant_p
(*(__uint32_t *)(&fw_data[i + 1])) ? (__uint32_t)(((__uint32_t
)(*(__uint32_t *)(&fw_data[i + 1])) & 0xff) << 24
| ((__uint32_t)(*(__uint32_t *)(&fw_data[i + 1])) & 0xff00
) << 8 | ((__uint32_t)(*(__uint32_t *)(&fw_data[i +
1])) & 0xff0000) >> 8 | ((__uint32_t)(*(__uint32_t
*)(&fw_data[i + 1])) & 0xff000000) >> 24) : __swap32md
(*(__uint32_t *)(&fw_data[i + 1])))), 0)
;
1106 }
1107 }
1108}
1109
1110int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1111{
1112 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX0];
1113 unsigned rb_bufsz;
1114 unsigned rb_blksz;
1115 unsigned max_fetch;
1116 unsigned pre_write_timer;
1117 unsigned pre_write_limit;
1118 unsigned indirect2_start;
1119 unsigned indirect1_start;
1120 uint32_t tmp;
1121 int r;
1122
1123 if (r100_debugfs_cp_init(rdev)) {
275
Calling 'r100_debugfs_cp_init'
277
Returning from 'r100_debugfs_cp_init'
278
Taking false branch
1124 DRM_ERROR("Failed to register debugfs file for CP !\n")__drm_err("Failed to register debugfs file for CP !\n");
1125 }
1126 if (!rdev->me_fw) {
279
Assuming field 'me_fw' is null
280
Taking true branch
1127 r = r100_cp_init_microcode(rdev);
281
Calling 'r100_cp_init_microcode'
1128 if (r) {
1129 DRM_ERROR("Failed to load firmware!\n")__drm_err("Failed to load firmware!\n");
1130 return r;
1131 }
1132 }
1133
1134 /* Align ring size */
1135 rb_bufsz = order_base_2(ring_size / 8)drm_order(ring_size / 8);
1136 ring_size = (1 << (rb_bufsz + 1)) * 4;
1137 r100_cp_load_microcode(rdev);
1138 r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET1024,
1139 RADEON_CP_PACKET20x80000000);
1140 if (r) {
1141 return r;
1142 }
1143 /* Each time the cp read 1024 bytes (16 dword/quadword) update
1144 * the rptr copy in system ram */
1145 rb_blksz = 9;
1146 /* cp will read 128bytes at a time (4 dwords) */
1147 max_fetch = 1;
1148 ring->align_mask = 16 - 1;
1149 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
1150 pre_write_timer = 64;
1151 /* Force CP_RB_WPTR write if written more than one time before the
1152 * delay expire
1153 */
1154 pre_write_limit = 0;
1155 /* Setup the cp cache like this (cache size is 96 dwords) :
1156 * RING 0 to 15
1157 * INDIRECT1 16 to 79
1158 * INDIRECT2 80 to 95
1159 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1160 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
1161 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1162 * Idea being that most of the gpu cmd will be through indirect1 buffer
1163 * so it gets the bigger cache.
1164 */
1165 indirect2_start = 80;
1166 indirect1_start = 16;
1167 /* cp setup */
1168 WREG32(0x718, pre_write_timer | (pre_write_limit << 28))r100_mm_wreg(rdev, (0x718), (pre_write_timer | (pre_write_limit
<< 28)), 0)
;
1169 tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz)(((rb_bufsz) << 0) & (0x3f << 0)) |
1170 REG_SET(RADEON_RB_BLKSZ, rb_blksz)(((rb_blksz) << 8) & (0x3f << 8)) |
1171 REG_SET(RADEON_MAX_FETCH, max_fetch)(((max_fetch) << 18) & (0x3 << 18)));
1172#ifdef __BIG_ENDIAN
1173 tmp |= RADEON_BUF_SWAP_32BIT(2 << 16);
1174#endif
1175 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE)r100_mm_wreg(rdev, (0x0704), (tmp | (1 << 27)), 0);
1176
1177 /* Set ring address */
1178 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr)printk("\0016" "[" "drm" "] " "radeon: ring at 0x%016lX\n", (
unsigned long)ring->gpu_addr)
;
1179 WREG32(RADEON_CP_RB_BASE, ring->gpu_addr)r100_mm_wreg(rdev, (0x0700), (ring->gpu_addr), 0);
1180 /* Force read & write ptr to 0 */
1181 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE)r100_mm_wreg(rdev, (0x0704), (tmp | (1 << 31) | (1 <<
27)), 0)
;
1182 WREG32(RADEON_CP_RB_RPTR_WR, 0)r100_mm_wreg(rdev, (0x071c), (0), 0);
1183 ring->wptr = 0;
1184 WREG32(RADEON_CP_RB_WPTR, ring->wptr)r100_mm_wreg(rdev, (0x0714), (ring->wptr), 0);
1185
1186 /* set the wb address whether it's enabled or not */
1187 WREG32(R_00070C_CP_RB_RPTR_ADDR,r100_mm_wreg(rdev, (0x00070C), (((((rdev->wb.gpu_addr + 1024
) >> 2) & 0x3FFFFFFF) << 2)), 0)
1188 S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2))r100_mm_wreg(rdev, (0x00070C), (((((rdev->wb.gpu_addr + 1024
) >> 2) & 0x3FFFFFFF) << 2)), 0)
;
1189 WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET)r100_mm_wreg(rdev, (0x000774), (rdev->wb.gpu_addr + 0), 0);
1190
1191 if (rdev->wb.enabled)
1192 WREG32(R_000770_SCRATCH_UMSK, 0xff)r100_mm_wreg(rdev, (0x000770), (0xff), 0);
1193 else {
1194 tmp |= RADEON_RB_NO_UPDATE(1 << 27);
1195 WREG32(R_000770_SCRATCH_UMSK, 0)r100_mm_wreg(rdev, (0x000770), (0), 0);
1196 }
1197
1198 WREG32(RADEON_CP_RB_CNTL, tmp)r100_mm_wreg(rdev, (0x0704), (tmp), 0);
1199 udelay(10);
1200 /* Set cp mode to bus mastering & enable cp*/
1201 WREG32(RADEON_CP_CSQ_MODE,r100_mm_wreg(rdev, (0x0744), ((((indirect2_start) << 0)
& (0x7f << 0)) | (((indirect1_start) << 8) &
(0x7f << 8))), 0)
1202 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |r100_mm_wreg(rdev, (0x0744), ((((indirect2_start) << 0)
& (0x7f << 0)) | (((indirect1_start) << 8) &
(0x7f << 8))), 0)
1203 REG_SET(RADEON_INDIRECT1_START, indirect1_start))r100_mm_wreg(rdev, (0x0744), ((((indirect2_start) << 0)
& (0x7f << 0)) | (((indirect1_start) << 8) &
(0x7f << 8))), 0)
;
1204 WREG32(RADEON_CP_RB_WPTR_DELAY, 0)r100_mm_wreg(rdev, (0x0718), (0), 0);
1205 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D)r100_mm_wreg(rdev, (0x0744), (0x00004D4D), 0);
1206 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM)r100_mm_wreg(rdev, (0x0740), ((4 << 28)), 0);
1207
1208 /* at this point everything should be setup correctly to enable master */
1209 pci_set_master(rdev->pdev);
1210
1211 radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX])(rdev)->asic->ring[(0)]->ring_start((rdev), (&rdev
->ring[0]))
;
1212 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring)(rdev)->asic->ring[(0)]->ring_test((rdev), (ring));
1213 if (r) {
1214 DRM_ERROR("radeon: cp isn't working (%d).\n", r)__drm_err("radeon: cp isn't working (%d).\n", r);
1215 return r;
1216 }
1217 ring->ready = true1;
1218 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1219
1220 if (!ring->rptr_save_reg /* not resuming from suspend */
1221 && radeon_ring_supports_scratch_reg(rdev, ring)) {
1222 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
1223 if (r) {
1224 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r)__drm_err("failed to get scratch reg for rptr save (%d).\n", r
)
;
1225 ring->rptr_save_reg = 0;
1226 }
1227 }
1228 return 0;
1229}
1230
1231void r100_cp_fini(struct radeon_device *rdev)
1232{
1233 if (r100_cp_wait_for_idle(rdev)) {
1234 DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n")__drm_err("Wait for CP idle timeout, shutting down CP.\n");
1235 }
1236 /* Disable ring */
1237 r100_cp_disable(rdev);
1238 radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX0].rptr_save_reg);
1239 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX0]);
1240 DRM_INFO("radeon: cp finalized\n")printk("\0016" "[" "drm" "] " "radeon: cp finalized\n");
1241}
1242
1243void r100_cp_disable(struct radeon_device *rdev)
1244{
1245 /* Disable ring */
1246 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1247 rdev->ring[RADEON_RING_TYPE_GFX_INDEX0].ready = false0;
1248 WREG32(RADEON_CP_CSQ_MODE, 0)r100_mm_wreg(rdev, (0x0744), (0), 0);
1249 WREG32(RADEON_CP_CSQ_CNTL, 0)r100_mm_wreg(rdev, (0x0740), (0), 0);
1250 WREG32(R_000770_SCRATCH_UMSK, 0)r100_mm_wreg(rdev, (0x000770), (0), 0);
1251 if (r100_gui_wait_for_idle(rdev)) {
1252 pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n")printk("\0014" "Failed to wait GUI idle while programming pipes. Bad things might happen.\n"
)
;
1253 }
1254}
1255
1256/*
1257 * CS functions
1258 */
1259int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1260 struct radeon_cs_packet *pkt,
1261 unsigned idx,
1262 unsigned reg)
1263{
1264 int r;
1265 u32 tile_flags = 0;
1266 u32 tmp;
1267 struct radeon_bo_list *reloc;
1268 u32 value;
1269
1270 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1271 if (r) {
1272 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg)
1273 idx, reg)__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg);
1274 radeon_cs_dump_packet(p, pkt);
1275 return r;
1276 }
1277
1278 value = radeon_get_ib_value(p, idx);
1279 tmp = value & 0x003fffff;
1280 tmp += (((u32)reloc->gpu_offset) >> 10);
1281
1282 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS0x01)) {
1283 if (reloc->tiling_flags & RADEON_TILING_MACRO0x1)
1284 tile_flags |= RADEON_DST_TILE_MACRO(1 << 30);
1285 if (reloc->tiling_flags & RADEON_TILING_MICRO0x2) {
1286 if (reg == RADEON_SRC_PITCH_OFFSET0x1428) {
1287 DRM_ERROR("Cannot src blit from microtiled surface\n")__drm_err("Cannot src blit from microtiled surface\n");
1288 radeon_cs_dump_packet(p, pkt);
1289 return -EINVAL22;
1290 }
1291 tile_flags |= RADEON_DST_TILE_MICRO(2 << 30);
1292 }
1293
1294 tmp |= tile_flags;
1295 p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
1296 } else
1297 p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
1298 return 0;
1299}
1300
1301int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1302 struct radeon_cs_packet *pkt,
1303 int idx)
1304{
1305 unsigned c, i;
1306 struct radeon_bo_list *reloc;
1307 struct r100_cs_track *track;
1308 int r = 0;
1309 volatile uint32_t *ib;
1310 u32 idx_value;
1311
1312 ib = p->ib.ptr;
1313 track = (struct r100_cs_track *)p->track;
1314 c = radeon_get_ib_value(p, idx++) & 0x1F;
1315 if (c > 16) {
1316 DRM_ERROR("Only 16 vertex buffers are allowed %d\n",__drm_err("Only 16 vertex buffers are allowed %d\n", pkt->
opcode)
1317 pkt->opcode)__drm_err("Only 16 vertex buffers are allowed %d\n", pkt->
opcode)
;
1318 radeon_cs_dump_packet(p, pkt);
1319 return -EINVAL22;
1320 }
1321 track->num_arrays = c;
1322 for (i = 0; i < (c - 1); i+=2, idx+=3) {
1323 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1324 if (r) {
1325 DRM_ERROR("No reloc for packet3 %d\n",__drm_err("No reloc for packet3 %d\n", pkt->opcode)
1326 pkt->opcode)__drm_err("No reloc for packet3 %d\n", pkt->opcode);
1327 radeon_cs_dump_packet(p, pkt);
1328 return r;
1329 }
1330 idx_value = radeon_get_ib_value(p, idx);
1331 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1332
1333 track->arrays[i + 0].esize = idx_value >> 8;
1334 track->arrays[i + 0].robj = reloc->robj;
1335 track->arrays[i + 0].esize &= 0x7F;
1336 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1337 if (r) {
1338 DRM_ERROR("No reloc for packet3 %d\n",__drm_err("No reloc for packet3 %d\n", pkt->opcode)
1339 pkt->opcode)__drm_err("No reloc for packet3 %d\n", pkt->opcode);
1340 radeon_cs_dump_packet(p, pkt);
1341 return r;
1342 }
1343 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
1344 track->arrays[i + 1].robj = reloc->robj;
1345 track->arrays[i + 1].esize = idx_value >> 24;
1346 track->arrays[i + 1].esize &= 0x7F;
1347 }
1348 if (c & 1) {
1349 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1350 if (r) {
1351 DRM_ERROR("No reloc for packet3 %d\n",__drm_err("No reloc for packet3 %d\n", pkt->opcode)
1352 pkt->opcode)__drm_err("No reloc for packet3 %d\n", pkt->opcode);
1353 radeon_cs_dump_packet(p, pkt);
1354 return r;
1355 }
1356 idx_value = radeon_get_ib_value(p, idx);
1357 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
1358 track->arrays[i + 0].robj = reloc->robj;
1359 track->arrays[i + 0].esize = idx_value >> 8;
1360 track->arrays[i + 0].esize &= 0x7F;
1361 }
1362 return r;
1363}
1364
1365int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1366 struct radeon_cs_packet *pkt,
1367 const unsigned *auth, unsigned n,
1368 radeon_packet0_check_t check)
1369{
1370 unsigned reg;
1371 unsigned i, j, m;
1372 unsigned idx;
1373 int r;
1374
1375 idx = pkt->idx + 1;
1376 reg = pkt->reg;
1377 /* Check that register fall into register range
1378 * determined by the number of entry (n) in the
1379 * safe register bitmap.
1380 */
1381 if (pkt->one_reg_wr) {
1382 if ((reg >> 7) > n) {
1383 return -EINVAL22;
1384 }
1385 } else {
1386 if (((reg + (pkt->count << 2)) >> 7) > n) {
1387 return -EINVAL22;
1388 }
1389 }
1390 for (i = 0; i <= pkt->count; i++, idx++) {
1391 j = (reg >> 7);
1392 m = 1 << ((reg >> 2) & 31);
1393 if (auth[j] & m) {
1394 r = check(p, pkt, idx, reg);
1395 if (r) {
1396 return r;
1397 }
1398 }
1399 if (pkt->one_reg_wr) {
1400 if (!(auth[j] & m)) {
1401 break;
1402 }
1403 } else {
1404 reg += 4;
1405 }
1406 }
1407 return 0;
1408}
1409
1410/**
1411 * r100_cs_packet_next_vline() - parse userspace VLINE packet
1412 * @parser: parser structure holding parsing context.
1413 *
1414 * Userspace sends a special sequence for VLINE waits.
1415 * PACKET0 - VLINE_START_END + value
1416 * PACKET0 - WAIT_UNTIL +_value
1417 * RELOC (P3) - crtc_id in reloc.
1418 *
1419 * This function parses this and relocates the VLINE START END
1420 * and WAIT UNTIL packets to the correct crtc.
1421 * It also detects a switched off crtc and nulls out the
1422 * wait in that case.
1423 */
1424int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1425{
1426 struct drm_crtc *crtc;
1427 struct radeon_crtc *radeon_crtc;
1428 struct radeon_cs_packet p3reloc, waitreloc;
1429 int crtc_id;
1430 int r;
1431 uint32_t header, h_idx, reg;
1432 volatile uint32_t *ib;
1433
1434 ib = p->ib.ptr;
1435
1436 /* parse the wait until */
1437 r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
1438 if (r)
1439 return r;
1440
1441 /* check its a wait until and only 1 count */
1442 if (waitreloc.reg != RADEON_WAIT_UNTIL0x1720 ||
1443 waitreloc.count != 0) {
1444 DRM_ERROR("vline wait had illegal wait until segment\n")__drm_err("vline wait had illegal wait until segment\n");
1445 return -EINVAL22;
1446 }
1447
1448 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE(1 << 3)) {
1449 DRM_ERROR("vline wait had illegal wait until\n")__drm_err("vline wait had illegal wait until\n");
1450 return -EINVAL22;
1451 }
1452
1453 /* jump over the NOP */
1454 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1455 if (r)
1456 return r;
1457
1458 h_idx = p->idx - 2;
1459 p->idx += waitreloc.count + 2;
1460 p->idx += p3reloc.count + 2;
1461
1462 header = radeon_get_ib_value(p, h_idx);
1463 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1464 reg = R100_CP_PACKET0_GET_REG(header)(((header) & 0x1FFF) << 2);
1465 crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
1466 if (!crtc) {
1467 DRM_ERROR("cannot find crtc %d\n", crtc_id)__drm_err("cannot find crtc %d\n", crtc_id);
1468 return -ENOENT2;
1469 }
1470 radeon_crtc = to_radeon_crtc(crtc)({ const __typeof( ((struct radeon_crtc *)0)->base ) *__mptr
= (crtc); (struct radeon_crtc *)( (char *)__mptr - __builtin_offsetof
(struct radeon_crtc, base) );})
;
1471 crtc_id = radeon_crtc->crtc_id;
1472
1473 if (!crtc->enabled) {
1474 /* if the CRTC isn't enabled - we need to nop out the wait until */
1475 ib[h_idx + 2] = PACKET2(0)(0x80000000 | ((((0)) << 0) & (0x3fffffff << 0
)))
;
1476 ib[h_idx + 3] = PACKET2(0)(0x80000000 | ((((0)) << 0) & (0x3fffffff << 0
)))
;
1477 } else if (crtc_id == 1) {
1478 switch (reg) {
1479 case AVIVO_D1MODE_VLINE_START_END0x6538:
1480 header &= ~R300_CP_PACKET0_REG_MASK0x00001fff;
1481 header |= AVIVO_D2MODE_VLINE_START_END0x6d38 >> 2;
1482 break;
1483 case RADEON_CRTC_GUI_TRIG_VLINE0x0218:
1484 header &= ~R300_CP_PACKET0_REG_MASK0x00001fff;
1485 header |= RADEON_CRTC2_GUI_TRIG_VLINE0x0318 >> 2;
1486 break;
1487 default:
1488 DRM_ERROR("unknown crtc reloc\n")__drm_err("unknown crtc reloc\n");
1489 return -EINVAL22;
1490 }
1491 ib[h_idx] = header;
1492 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1(1 << 31);
1493 }
1494
1495 return 0;
1496}
1497
1498static int r100_get_vtx_size(uint32_t vtx_fmt)
1499{
1500 int vtx_size;
1501 vtx_size = 2;
1502 /* ordered according to bits in spec */
1503 if (vtx_fmt & RADEON_SE_VTX_FMT_W00x00000001)
1504 vtx_size++;
1505 if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR0x00000002)
1506 vtx_size += 3;
1507 if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA0x00000004)
1508 vtx_size++;
1509 if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR0x00000008)
1510 vtx_size++;
1511 if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC0x00000010)
1512 vtx_size += 3;
1513 if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG0x00000020)
1514 vtx_size++;
1515 if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC0x00000040)
1516 vtx_size++;
1517 if (vtx_fmt & RADEON_SE_VTX_FMT_ST00x00000080)
1518 vtx_size += 2;
1519 if (vtx_fmt & RADEON_SE_VTX_FMT_ST10x00000100)
1520 vtx_size += 2;
1521 if (vtx_fmt & RADEON_SE_VTX_FMT_Q10x00000200)
1522 vtx_size++;
1523 if (vtx_fmt & RADEON_SE_VTX_FMT_ST20x00000400)
1524 vtx_size += 2;
1525 if (vtx_fmt & RADEON_SE_VTX_FMT_Q20x00000800)
1526 vtx_size++;
1527 if (vtx_fmt & RADEON_SE_VTX_FMT_ST30x00001000)
1528 vtx_size += 2;
1529 if (vtx_fmt & RADEON_SE_VTX_FMT_Q30x00002000)
1530 vtx_size++;
1531 if (vtx_fmt & RADEON_SE_VTX_FMT_Q00x00004000)
1532 vtx_size++;
1533 /* blend weight */
1534 if (vtx_fmt & (0x7 << 15))
1535 vtx_size += (vtx_fmt >> 15) & 0x7;
1536 if (vtx_fmt & RADEON_SE_VTX_FMT_N00x00040000)
1537 vtx_size += 3;
1538 if (vtx_fmt & RADEON_SE_VTX_FMT_XY10x08000000)
1539 vtx_size += 2;
1540 if (vtx_fmt & RADEON_SE_VTX_FMT_Z10x10000000)
1541 vtx_size++;
1542 if (vtx_fmt & RADEON_SE_VTX_FMT_W10x20000000)
1543 vtx_size++;
1544 if (vtx_fmt & RADEON_SE_VTX_FMT_N10x40000000)
1545 vtx_size++;
1546 if (vtx_fmt & RADEON_SE_VTX_FMT_Z0x80000000)
1547 vtx_size++;
1548 return vtx_size;
1549}
1550
1551static int r100_packet0_check(struct radeon_cs_parser *p,
1552 struct radeon_cs_packet *pkt,
1553 unsigned idx, unsigned reg)
1554{
1555 struct radeon_bo_list *reloc;
1556 struct r100_cs_track *track;
1557 volatile uint32_t *ib;
1558 uint32_t tmp;
1559 int r;
1560 int i, face;
1561 u32 tile_flags = 0;
1562 u32 idx_value;
1563
1564 ib = p->ib.ptr;
1565 track = (struct r100_cs_track *)p->track;
1566
1567 idx_value = radeon_get_ib_value(p, idx);
1568
1569 switch (reg) {
1570 case RADEON_CRTC_GUI_TRIG_VLINE0x0218:
1571 r = r100_cs_packet_parse_vline(p);
1572 if (r) {
1573 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg)
1574 idx, reg)__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg);
1575 radeon_cs_dump_packet(p, pkt);
1576 return r;
1577 }
1578 break;
1579 /* FIXME: only allow PACKET3 blit? easier to check for out of
1580 * range access */
1581 case RADEON_DST_PITCH_OFFSET0x142c:
1582 case RADEON_SRC_PITCH_OFFSET0x1428:
1583 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1584 if (r)
1585 return r;
1586 break;
1587 case RADEON_RB3D_DEPTHOFFSET0x1c24:
1588 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1589 if (r) {
1590 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg)
1591 idx, reg)__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg);
1592 radeon_cs_dump_packet(p, pkt);
1593 return r;
1594 }
1595 track->zb.robj = reloc->robj;
1596 track->zb.offset = idx_value;
1597 track->zb_dirty = true1;
1598 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1599 break;
1600 case RADEON_RB3D_COLOROFFSET0x1c40:
1601 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1602 if (r) {
1603 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg)
1604 idx, reg)__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg);
1605 radeon_cs_dump_packet(p, pkt);
1606 return r;
1607 }
1608 track->cb[0].robj = reloc->robj;
1609 track->cb[0].offset = idx_value;
1610 track->cb_dirty = true1;
1611 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1612 break;
1613 case RADEON_PP_TXOFFSET_00x1c5c:
1614 case RADEON_PP_TXOFFSET_10x1c74:
1615 case RADEON_PP_TXOFFSET_20x1c8c:
1616 i = (reg - RADEON_PP_TXOFFSET_00x1c5c) / 24;
1617 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1618 if (r) {
1619 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg)
1620 idx, reg)__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg);
1621 radeon_cs_dump_packet(p, pkt);
1622 return r;
1623 }
1624 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS0x01)) {
1625 if (reloc->tiling_flags & RADEON_TILING_MACRO0x1)
1626 tile_flags |= RADEON_TXO_MACRO_TILE(1 << 2);
1627 if (reloc->tiling_flags & RADEON_TILING_MICRO0x2)
1628 tile_flags |= RADEON_TXO_MICRO_TILE_X2(1 << 3);
1629
1630 tmp = idx_value & ~(0x7 << 2);
1631 tmp |= tile_flags;
1632 ib[idx] = tmp + ((u32)reloc->gpu_offset);
1633 } else
1634 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1635 track->textures[i].robj = reloc->robj;
1636 track->tex_dirty = true1;
1637 break;
1638 case RADEON_PP_CUBIC_OFFSET_T0_00x1dd0:
1639 case RADEON_PP_CUBIC_OFFSET_T0_10x1dd4:
1640 case RADEON_PP_CUBIC_OFFSET_T0_20x1dd8:
1641 case RADEON_PP_CUBIC_OFFSET_T0_30x1ddc:
1642 case RADEON_PP_CUBIC_OFFSET_T0_40x1de0:
1643 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_00x1dd0) / 4;
1644 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1645 if (r) {
1646 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg)
1647 idx, reg)__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg);
1648 radeon_cs_dump_packet(p, pkt);
1649 return r;
1650 }
1651 track->textures[0].cube_info[i].offset = idx_value;
1652 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1653 track->textures[0].cube_info[i].robj = reloc->robj;
1654 track->tex_dirty = true1;
1655 break;
1656 case RADEON_PP_CUBIC_OFFSET_T1_00x1e00:
1657 case RADEON_PP_CUBIC_OFFSET_T1_10x1e04:
1658 case RADEON_PP_CUBIC_OFFSET_T1_20x1e08:
1659 case RADEON_PP_CUBIC_OFFSET_T1_30x1e0c:
1660 case RADEON_PP_CUBIC_OFFSET_T1_40x1e10:
1661 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_00x1e00) / 4;
1662 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1663 if (r) {
1664 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg)
1665 idx, reg)__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg);
1666 radeon_cs_dump_packet(p, pkt);
1667 return r;
1668 }
1669 track->textures[1].cube_info[i].offset = idx_value;
1670 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1671 track->textures[1].cube_info[i].robj = reloc->robj;
1672 track->tex_dirty = true1;
1673 break;
1674 case RADEON_PP_CUBIC_OFFSET_T2_00x1e14:
1675 case RADEON_PP_CUBIC_OFFSET_T2_10x1e18:
1676 case RADEON_PP_CUBIC_OFFSET_T2_20x1e1c:
1677 case RADEON_PP_CUBIC_OFFSET_T2_30x1e20:
1678 case RADEON_PP_CUBIC_OFFSET_T2_40x1e24:
1679 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_00x1e14) / 4;
1680 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1681 if (r) {
1682 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg)
1683 idx, reg)__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg);
1684 radeon_cs_dump_packet(p, pkt);
1685 return r;
1686 }
1687 track->textures[2].cube_info[i].offset = idx_value;
1688 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1689 track->textures[2].cube_info[i].robj = reloc->robj;
1690 track->tex_dirty = true1;
1691 break;
1692 case RADEON_RE_WIDTH_HEIGHT0x1c44:
1693 track->maxy = ((idx_value >> 16) & 0x7FF);
1694 track->cb_dirty = true1;
1695 track->zb_dirty = true1;
1696 break;
1697 case RADEON_RB3D_COLORPITCH0x1c48:
1698 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1699 if (r) {
1700 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg)
1701 idx, reg)__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg);
1702 radeon_cs_dump_packet(p, pkt);
1703 return r;
1704 }
1705 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS0x01)) {
1706 if (reloc->tiling_flags & RADEON_TILING_MACRO0x1)
1707 tile_flags |= RADEON_COLOR_TILE_ENABLE(1 << 16);
1708 if (reloc->tiling_flags & RADEON_TILING_MICRO0x2)
1709 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE(1 << 17);
1710
1711 tmp = idx_value & ~(0x7 << 16);
1712 tmp |= tile_flags;
1713 ib[idx] = tmp;
1714 } else
1715 ib[idx] = idx_value;
1716
1717 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK0x000001ff8;
1718 track->cb_dirty = true1;
1719 break;
1720 case RADEON_RB3D_DEPTHPITCH0x1c28:
1721 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK0x00001ff8;
1722 track->zb_dirty = true1;
1723 break;
1724 case RADEON_RB3D_CNTL0x1c3c:
1725 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT10) & 0x1f) {
1726 case 7:
1727 case 8:
1728 case 9:
1729 case 11:
1730 case 12:
1731 track->cb[0].cpp = 1;
1732 break;
1733 case 3:
1734 case 4:
1735 case 15:
1736 track->cb[0].cpp = 2;
1737 break;
1738 case 6:
1739 track->cb[0].cpp = 4;
1740 break;
1741 default:
1742 DRM_ERROR("Invalid color buffer format (%d) !\n",__drm_err("Invalid color buffer format (%d) !\n", ((idx_value
>> 10) & 0x1f))
1743 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f))__drm_err("Invalid color buffer format (%d) !\n", ((idx_value
>> 10) & 0x1f))
;
1744 return -EINVAL22;
1745 }
1746 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE(1 << 8));
1747 track->cb_dirty = true1;
1748 track->zb_dirty = true1;
1749 break;
1750 case RADEON_RB3D_ZSTENCILCNTL0x1c2c:
1751 switch (idx_value & 0xf) {
1752 case 0:
1753 track->zb.cpp = 2;
1754 break;
1755 case 2:
1756 case 3:
1757 case 4:
1758 case 5:
1759 case 9:
1760 case 11:
1761 track->zb.cpp = 4;
1762 break;
1763 default:
1764 break;
1765 }
1766 track->zb_dirty = true1;
1767 break;
1768 case RADEON_RB3D_ZPASS_ADDR0x3294:
1769 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1770 if (r) {
1771 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg)
1772 idx, reg)__drm_err("No reloc for ib[%d]=0x%04X\n", idx, reg);
1773 radeon_cs_dump_packet(p, pkt);
1774 return r;
1775 }
1776 ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1777 break;
1778 case RADEON_PP_CNTL0x1c38:
1779 {
1780 uint32_t temp = idx_value >> 4;
1781 for (i = 0; i < track->num_texture; i++)
1782 track->textures[i].enabled = !!(temp & (1 << i));
1783 track->tex_dirty = true1;
1784 }
1785 break;
1786 case RADEON_SE_VF_CNTL0x2084:
1787 track->vap_vf_cntl = idx_value;
1788 break;
1789 case RADEON_SE_VTX_FMT0x2080:
1790 track->vtx_size = r100_get_vtx_size(idx_value);
1791 break;
1792 case RADEON_PP_TEX_SIZE_00x1d04:
1793 case RADEON_PP_TEX_SIZE_10x1d0c:
1794 case RADEON_PP_TEX_SIZE_20x1d14:
1795 i = (reg - RADEON_PP_TEX_SIZE_00x1d04) / 8;
1796 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK(0x7ff << 0)) + 1;
1797 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK(0x7ff << 16)) >> RADEON_TEX_VSIZE_SHIFT16) + 1;
1798 track->tex_dirty = true1;
1799 break;
1800 case RADEON_PP_TEX_PITCH_00x1d08:
1801 case RADEON_PP_TEX_PITCH_10x1d10:
1802 case RADEON_PP_TEX_PITCH_20x1d18:
1803 i = (reg - RADEON_PP_TEX_PITCH_00x1d08) / 8;
1804 track->textures[i].pitch = idx_value + 32;
1805 track->tex_dirty = true1;
1806 break;
1807 case RADEON_PP_TXFILTER_00x1c54:
1808 case RADEON_PP_TXFILTER_10x1c6c:
1809 case RADEON_PP_TXFILTER_20x1c84:
1810 i = (reg - RADEON_PP_TXFILTER_00x1c54) / 24;
1811 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK(0x0f << 16))
1812 >> RADEON_MAX_MIP_LEVEL_SHIFT16);
1813 tmp = (idx_value >> 23) & 0x7;
1814 if (tmp == 2 || tmp == 6)
1815 track->textures[i].roundup_w = false0;
1816 tmp = (idx_value >> 27) & 0x7;
1817 if (tmp == 2 || tmp == 6)
1818 track->textures[i].roundup_h = false0;
1819 track->tex_dirty = true1;
1820 break;
1821 case RADEON_PP_TXFORMAT_00x1c58:
1822 case RADEON_PP_TXFORMAT_10x1c70:
1823 case RADEON_PP_TXFORMAT_20x1c88:
1824 i = (reg - RADEON_PP_TXFORMAT_00x1c58) / 24;
1825 if (idx_value & RADEON_TXFORMAT_NON_POWER2(1 << 7)) {
1826 track->textures[i].use_pitch = true1;
1827 } else {
1828 track->textures[i].use_pitch = false0;
1829 track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK(15 << 8)) >> RADEON_TXFORMAT_WIDTH_SHIFT8);
1830 track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK(15 << 12)) >> RADEON_TXFORMAT_HEIGHT_SHIFT12);
1831 }
1832 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE(1 << 30))
1833 track->textures[i].tex_coord_type = 2;
1834 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK(31 << 0))) {
1835 case RADEON_TXFORMAT_I8(0 << 0):
1836 case RADEON_TXFORMAT_RGB332(2 << 0):
1837 case RADEON_TXFORMAT_Y8(8 << 0):
1838 track->textures[i].cpp = 1;
1839 track->textures[i].compress_format = R100_TRACK_COMP_NONE0;
1840 break;
1841 case RADEON_TXFORMAT_AI88(1 << 0):
1842 case RADEON_TXFORMAT_ARGB1555(3 << 0):
1843 case RADEON_TXFORMAT_RGB565(4 << 0):
1844 case RADEON_TXFORMAT_ARGB4444(5 << 0):
1845 case RADEON_TXFORMAT_VYUY422(10 << 0):
1846 case RADEON_TXFORMAT_YVYU422(11 << 0):
1847 case RADEON_TXFORMAT_SHADOW16(16 << 0):
1848 case RADEON_TXFORMAT_LDUDV655(19 << 0):
1849 case RADEON_TXFORMAT_DUDV88(18 << 0):
1850 track->textures[i].cpp = 2;
1851 track->textures[i].compress_format = R100_TRACK_COMP_NONE0;
1852 break;
1853 case RADEON_TXFORMAT_ARGB8888(6 << 0):
1854 case RADEON_TXFORMAT_RGBA8888(7 << 0):
1855 case RADEON_TXFORMAT_SHADOW32(17 << 0):
1856 case RADEON_TXFORMAT_LDUDUV8888(20 << 0):
1857 track->textures[i].cpp = 4;
1858 track->textures[i].compress_format = R100_TRACK_COMP_NONE0;
1859 break;
1860 case RADEON_TXFORMAT_DXT1(12 << 0):
1861 track->textures[i].cpp = 1;
1862 track->textures[i].compress_format = R100_TRACK_COMP_DXT11;
1863 break;
1864 case RADEON_TXFORMAT_DXT23(14 << 0):
1865 case RADEON_TXFORMAT_DXT45(15 << 0):
1866 track->textures[i].cpp = 1;
1867 track->textures[i].compress_format = R100_TRACK_COMP_DXT352;
1868 break;
1869 }
1870 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1871 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1872 track->tex_dirty = true1;
1873 break;
1874 case RADEON_PP_CUBIC_FACES_00x1d24:
1875 case RADEON_PP_CUBIC_FACES_10x1d28:
1876 case RADEON_PP_CUBIC_FACES_20x1d2c:
1877 tmp = idx_value;
1878 i = (reg - RADEON_PP_CUBIC_FACES_00x1d24) / 4;
1879 for (face = 0; face < 4; face++) {
1880 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1881 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1882 }
1883 track->tex_dirty = true1;
1884 break;
1885 default:
1886 pr_err("Forbidden register 0x%04X in cs at %d\n", reg, idx)printk("\0013" "Forbidden register 0x%04X in cs at %d\n", reg
, idx)
;
1887 return -EINVAL22;
1888 }
1889 return 0;
1890}
1891
1892int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1893 struct radeon_cs_packet *pkt,
1894 struct radeon_bo *robj)
1895{
1896 unsigned idx;
1897 u32 value;
1898 idx = pkt->idx + 1;
1899 value = radeon_get_ib_value(p, idx + 2);
1900 if ((value + 1) > radeon_bo_size(robj)) {
1901 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "__drm_err("[drm] Buffer too small for PACKET3 INDX_BUFFER " "(need %u have %lu) !\n"
, value + 1, radeon_bo_size(robj))
1902 "(need %u have %lu) !\n",__drm_err("[drm] Buffer too small for PACKET3 INDX_BUFFER " "(need %u have %lu) !\n"
, value + 1, radeon_bo_size(robj))
1903 value + 1,__drm_err("[drm] Buffer too small for PACKET3 INDX_BUFFER " "(need %u have %lu) !\n"
, value + 1, radeon_bo_size(robj))
1904 radeon_bo_size(robj))__drm_err("[drm] Buffer too small for PACKET3 INDX_BUFFER " "(need %u have %lu) !\n"
, value + 1, radeon_bo_size(robj))
;
1905 return -EINVAL22;
1906 }
1907 return 0;
1908}
1909
1910static int r100_packet3_check(struct radeon_cs_parser *p,
1911 struct radeon_cs_packet *pkt)
1912{
1913 struct radeon_bo_list *reloc;
1914 struct r100_cs_track *track;
1915 unsigned idx;
1916 volatile uint32_t *ib;
1917 int r;
1918
1919 ib = p->ib.ptr;
1920 idx = pkt->idx + 1;
1921 track = (struct r100_cs_track *)p->track;
1922 switch (pkt->opcode) {
1923 case PACKET3_3D_LOAD_VBPNTR0x2F:
1924 r = r100_packet3_load_vbpntr(p, pkt, idx);
1925 if (r)
1926 return r;
1927 break;
1928 case PACKET3_INDX_BUFFER0x33:
1929 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1930 if (r) {
1931 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode)__drm_err("No reloc for packet3 %d\n", pkt->opcode);
1932 radeon_cs_dump_packet(p, pkt);
1933 return r;
1934 }
1935 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
1936 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1937 if (r) {
1938 return r;
1939 }
1940 break;
1941 case 0x23:
1942 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1943 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1944 if (r) {
1945 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode)__drm_err("No reloc for packet3 %d\n", pkt->opcode);
1946 radeon_cs_dump_packet(p, pkt);
1947 return r;
1948 }
1949 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
1950 track->num_arrays = 1;
1951 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1952
1953 track->arrays[0].robj = reloc->robj;
1954 track->arrays[0].esize = track->vtx_size;
1955
1956 track->max_indx = radeon_get_ib_value(p, idx+1);
1957
1958 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1959 track->immd_dwords = pkt->count - 1;
1960 r = r100_cs_track_check(p->rdev, track);
1961 if (r)
1962 return r;
1963 break;
1964 case PACKET3_3D_DRAW_IMMD0x29:
1965 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1966 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n")__drm_err("PRIM_WALK must be 3 for IMMD draw\n");
1967 return -EINVAL22;
1968 }
1969 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1970 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1971 track->immd_dwords = pkt->count - 1;
1972 r = r100_cs_track_check(p->rdev, track);
1973 if (r)
1974 return r;
1975 break;
1976 /* triggers drawing using in-packet vertex data */
1977 case PACKET3_3D_DRAW_IMMD_20x35:
1978 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1979 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n")__drm_err("PRIM_WALK must be 3 for IMMD draw\n");
1980 return -EINVAL22;
1981 }
1982 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1983 track->immd_dwords = pkt->count;
1984 r = r100_cs_track_check(p->rdev, track);
1985 if (r)
1986 return r;
1987 break;
1988 /* triggers drawing using in-packet vertex data */
1989 case PACKET3_3D_DRAW_VBUF_20x34:
1990 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1991 r = r100_cs_track_check(p->rdev, track);
1992 if (r)
1993 return r;
1994 break;
1995 /* triggers drawing of vertex buffers setup elsewhere */
1996 case PACKET3_3D_DRAW_INDX_20x36:
1997 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1998 r = r100_cs_track_check(p->rdev, track);
1999 if (r)
2000 return r;
2001 break;
2002 /* triggers drawing using indices to vertex buffer */
2003 case PACKET3_3D_DRAW_VBUF0x28:
2004 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2005 r = r100_cs_track_check(p->rdev, track);
2006 if (r)
2007 return r;
2008 break;
2009 /* triggers drawing of vertex buffers setup elsewhere */
2010 case PACKET3_3D_DRAW_INDX0x2A:
2011 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2012 r = r100_cs_track_check(p->rdev, track);
2013 if (r)
2014 return r;
2015 break;
2016 /* triggers drawing using indices to vertex buffer */
2017 case PACKET3_3D_CLEAR_HIZ0x37:
2018 case PACKET3_3D_CLEAR_ZMASK0x32:
2019 if (p->rdev->hyperz_filp != p->filp)
2020 return -EINVAL22;
2021 break;
2022 case PACKET3_NOP0x10:
2023 break;
2024 default:
2025 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode)__drm_err("Packet3 opcode %x not supported\n", pkt->opcode
)
;
2026 return -EINVAL22;
2027 }
2028 return 0;
2029}
2030
2031int r100_cs_parse(struct radeon_cs_parser *p)
2032{
2033 struct radeon_cs_packet pkt;
2034 struct r100_cs_track *track;
2035 int r;
2036
2037 track = kzalloc(sizeof(*track), GFP_KERNEL(0x0001 | 0x0004));
2038 if (!track)
2039 return -ENOMEM12;
2040 r100_cs_track_clear(p->rdev, track);
2041 p->track = track;
2042 do {
2043 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2044 if (r) {
2045 return r;
2046 }
2047 p->idx += pkt.count + 2;
2048 switch (pkt.type) {
2049 case RADEON_PACKET_TYPE00:
2050 if (p->rdev->family >= CHIP_R200)
2051 r = r100_cs_parse_packet0(p, &pkt,
2052 p->rdev->config.r100.reg_safe_bm,
2053 p->rdev->config.r100.reg_safe_bm_size,
2054 &r200_packet0_check);
2055 else
2056 r = r100_cs_parse_packet0(p, &pkt,
2057 p->rdev->config.r100.reg_safe_bm,
2058 p->rdev->config.r100.reg_safe_bm_size,
2059 &r100_packet0_check);
2060 break;
2061 case RADEON_PACKET_TYPE22:
2062 break;
2063 case RADEON_PACKET_TYPE33:
2064 r = r100_packet3_check(p, &pkt);
2065 break;
2066 default:
2067 DRM_ERROR("Unknown packet type %d !\n",__drm_err("Unknown packet type %d !\n", pkt.type)
2068 pkt.type)__drm_err("Unknown packet type %d !\n", pkt.type);
2069 return -EINVAL22;
2070 }
2071 if (r)
2072 return r;
2073 } while (p->idx < p->chunk_ib->length_dw);
2074 return 0;
2075}
2076
2077static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2078{
2079 DRM_ERROR("pitch %d\n", t->pitch)__drm_err("pitch %d\n", t->pitch);
2080 DRM_ERROR("use_pitch %d\n", t->use_pitch)__drm_err("use_pitch %d\n", t->use_pitch);
2081 DRM_ERROR("width %d\n", t->width)__drm_err("width %d\n", t->width);
2082 DRM_ERROR("width_11 %d\n", t->width_11)__drm_err("width_11 %d\n", t->width_11);
2083 DRM_ERROR("height %d\n", t->height)__drm_err("height %d\n", t->height);
2084 DRM_ERROR("height_11 %d\n", t->height_11)__drm_err("height_11 %d\n", t->height_11);
2085 DRM_ERROR("num levels %d\n", t->num_levels)__drm_err("num levels %d\n", t->num_levels
)
;
2086 DRM_ERROR("depth %d\n", t->txdepth)__drm_err("depth %d\n", t->txdepth);
2087 DRM_ERROR("bpp %d\n", t->cpp)__drm_err("bpp %d\n", t->cpp);
2088 DRM_ERROR("coordinate type %d\n", t->tex_coord_type)__drm_err("coordinate type %d\n", t->tex_coord_type
)
;
2089 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w)__drm_err("width round to power of 2 %d\n", t->roundup_w);
2090 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h)__drm_err("height round to power of 2 %d\n", t->roundup_h);
2091 DRM_ERROR("compress format %d\n", t->compress_format)__drm_err("compress format %d\n", t->compress_format
)
;
2092}
2093
2094static int r100_track_compress_size(int compress_format, int w, int h)
2095{
2096 int block_width, block_height, block_bytes;
2097 int wblocks, hblocks;
2098 int min_wblocks;
2099 int sz;
2100
2101 block_width = 4;
2102 block_height = 4;
2103
2104 switch (compress_format) {
2105 case R100_TRACK_COMP_DXT11:
2106 block_bytes = 8;
2107 min_wblocks = 4;
2108 break;
2109 default:
2110 case R100_TRACK_COMP_DXT352:
2111 block_bytes = 16;
2112 min_wblocks = 2;
2113 break;
2114 }
2115
2116 hblocks = (h + block_height - 1) / block_height;
2117 wblocks = (w + block_width - 1) / block_width;
2118 if (wblocks < min_wblocks)
2119 wblocks = min_wblocks;
2120 sz = wblocks * hblocks * block_bytes;
2121 return sz;
2122}
2123
2124static int r100_cs_track_cube(struct radeon_device *rdev,
2125 struct r100_cs_track *track, unsigned idx)
2126{
2127 unsigned face, w, h;
2128 struct radeon_bo *cube_robj;
2129 unsigned long size;
2130 unsigned compress_format = track->textures[idx].compress_format;
2131
2132 for (face = 0; face < 5; face++) {
2133 cube_robj = track->textures[idx].cube_info[face].robj;
2134 w = track->textures[idx].cube_info[face].width;
2135 h = track->textures[idx].cube_info[face].height;
2136
2137 if (compress_format) {
2138 size = r100_track_compress_size(compress_format, w, h);
2139 } else
2140 size = w * h;
2141 size *= track->textures[idx].cpp;
2142
2143 size += track->textures[idx].cube_info[face].offset;
2144
2145 if (size > radeon_bo_size(cube_robj)) {
2146 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",__drm_err("Cube texture offset greater than object size %lu %lu\n"
, size, radeon_bo_size(cube_robj))
2147 size, radeon_bo_size(cube_robj))__drm_err("Cube texture offset greater than object size %lu %lu\n"
, size, radeon_bo_size(cube_robj))
;
2148 r100_cs_track_texture_print(&track->textures[idx]);
2149 return -1;
2150 }
2151 }
2152 return 0;
2153}
2154
2155static int r100_cs_track_texture_check(struct radeon_device *rdev,
2156 struct r100_cs_track *track)
2157{
2158 struct radeon_bo *robj;
2159 unsigned long size;
2160 unsigned u, i, w, h, d;
2161 int ret;
2162
2163 for (u = 0; u < track->num_texture; u++) {
2164 if (!track->textures[u].enabled)
2165 continue;
2166 if (track->textures[u].lookup_disable)
2167 continue;
2168 robj = track->textures[u].robj;
2169 if (robj == NULL((void *)0)) {
2170 DRM_ERROR("No texture bound to unit %u\n", u)__drm_err("No texture bound to unit %u\n", u);
2171 return -EINVAL22;
2172 }
2173 size = 0;
2174 for (i = 0; i <= track->textures[u].num_levels; i++) {
2175 if (track->textures[u].use_pitch) {
2176 if (rdev->family < CHIP_R300)
2177 w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
2178 else
2179 w = track->textures[u].pitch / (1 << i);
2180 } else {
2181 w = track->textures[u].width;
2182 if (rdev->family >= CHIP_RV515)
2183 w |= track->textures[u].width_11;
2184 w = w / (1 << i);
2185 if (track->textures[u].roundup_w)
2186 w = roundup_pow_of_two(w);
2187 }
2188 h = track->textures[u].height;
2189 if (rdev->family >= CHIP_RV515)
2190 h |= track->textures[u].height_11;
2191 h = h / (1 << i);
2192 if (track->textures[u].roundup_h)
2193 h = roundup_pow_of_two(h);
2194 if (track->textures[u].tex_coord_type == 1) {
2195 d = (1 << track->textures[u].txdepth) / (1 << i);
2196 if (!d)
2197 d = 1;
2198 } else {
2199 d = 1;
2200 }
2201 if (track->textures[u].compress_format) {
2202
2203 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
2204 /* compressed textures are block based */
2205 } else
2206 size += w * h * d;
2207 }
2208 size *= track->textures[u].cpp;
2209
2210 switch (track->textures[u].tex_coord_type) {
2211 case 0:
2212 case 1:
2213 break;
2214 case 2:
2215 if (track->separate_cube) {
2216 ret = r100_cs_track_cube(rdev, track, u);
2217 if (ret)
2218 return ret;
2219 } else
2220 size *= 6;
2221 break;
2222 default:
2223 DRM_ERROR("Invalid texture coordinate type %u for unit "__drm_err("Invalid texture coordinate type %u for unit " "%u\n"
, track->textures[u].tex_coord_type, u)
2224 "%u\n", track->textures[u].tex_coord_type, u)__drm_err("Invalid texture coordinate type %u for unit " "%u\n"
, track->textures[u].tex_coord_type, u)
;
2225 return -EINVAL22;
2226 }
2227 if (size > radeon_bo_size(robj)) {
2228 DRM_ERROR("Texture of unit %u needs %lu bytes but is "__drm_err("Texture of unit %u needs %lu bytes but is " "%lu\n"
, u, size, radeon_bo_size(robj))
2229 "%lu\n", u, size, radeon_bo_size(robj))__drm_err("Texture of unit %u needs %lu bytes but is " "%lu\n"
, u, size, radeon_bo_size(robj))
;
2230 r100_cs_track_texture_print(&track->textures[u]);
2231 return -EINVAL22;
2232 }
2233 }
2234 return 0;
2235}
2236
2237int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2238{
2239 unsigned i;
2240 unsigned long size;
2241 unsigned prim_walk;
2242 unsigned nverts;
2243 unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
2244
2245 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
2246 !track->blend_read_enable)
2247 num_cb = 0;
2248
2249 for (i = 0; i < num_cb; i++) {
2250 if (track->cb[i].robj == NULL((void *)0)) {
2251 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i)__drm_err("[drm] No buffer for color buffer %d !\n", i);
2252 return -EINVAL22;
2253 }
2254 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2255 size += track->cb[i].offset;
2256 if (size > radeon_bo_size(track->cb[i].robj)) {
2257 DRM_ERROR("[drm] Buffer too small for color buffer %d "__drm_err("[drm] Buffer too small for color buffer %d " "(need %lu have %lu) !\n"
, i, size, radeon_bo_size(track->cb[i].robj))
2258 "(need %lu have %lu) !\n", i, size,__drm_err("[drm] Buffer too small for color buffer %d " "(need %lu have %lu) !\n"
, i, size, radeon_bo_size(track->cb[i].robj))
2259 radeon_bo_size(track->cb[i].robj))__drm_err("[drm] Buffer too small for color buffer %d " "(need %lu have %lu) !\n"
, i, size, radeon_bo_size(track->cb[i].robj))
;
2260 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",__drm_err("[drm] color buffer %d (%u %u %u %u)\n", i, track->
cb[i].pitch, track->cb[i].cpp, track->cb[i].offset, track
->maxy)
2261 i, track->cb[i].pitch, track->cb[i].cpp,__drm_err("[drm] color buffer %d (%u %u %u %u)\n", i, track->
cb[i].pitch, track->cb[i].cpp, track->cb[i].offset, track
->maxy)
2262 track->cb[i].offset, track->maxy)__drm_err("[drm] color buffer %d (%u %u %u %u)\n", i, track->
cb[i].pitch, track->cb[i].cpp, track->cb[i].offset, track
->maxy)
;
2263 return -EINVAL22;
2264 }
2265 }
2266 track->cb_dirty = false0;
2267
2268 if (track->zb_dirty && track->z_enabled) {
2269 if (track->zb.robj == NULL((void *)0)) {
2270 DRM_ERROR("[drm] No buffer for z buffer !\n")__drm_err("[drm] No buffer for z buffer !\n");
2271 return -EINVAL22;
2272 }
2273 size = track->zb.pitch * track->zb.cpp * track->maxy;
2274 size += track->zb.offset;
2275 if (size > radeon_bo_size(track->zb.robj)) {
2276 DRM_ERROR("[drm] Buffer too small for z buffer "__drm_err("[drm] Buffer too small for z buffer " "(need %lu have %lu) !\n"
, size, radeon_bo_size(track->zb.robj))
2277 "(need %lu have %lu) !\n", size,__drm_err("[drm] Buffer too small for z buffer " "(need %lu have %lu) !\n"
, size, radeon_bo_size(track->zb.robj))
2278 radeon_bo_size(track->zb.robj))__drm_err("[drm] Buffer too small for z buffer " "(need %lu have %lu) !\n"
, size, radeon_bo_size(track->zb.robj))
;
2279 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",__drm_err("[drm] zbuffer (%u %u %u %u)\n", track->zb.pitch
, track->zb.cpp, track->zb.offset, track->maxy)
2280 track->zb.pitch, track->zb.cpp,__drm_err("[drm] zbuffer (%u %u %u %u)\n", track->zb.pitch
, track->zb.cpp, track->zb.offset, track->maxy)
2281 track->zb.offset, track->maxy)__drm_err("[drm] zbuffer (%u %u %u %u)\n", track->zb.pitch
, track->zb.cpp, track->zb.offset, track->maxy)
;
2282 return -EINVAL22;
2283 }
2284 }
2285 track->zb_dirty = false0;
2286
2287 if (track->aa_dirty && track->aaresolve) {
2288 if (track->aa.robj == NULL((void *)0)) {
2289 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i)__drm_err("[drm] No buffer for AA resolve buffer %d !\n", i);
2290 return -EINVAL22;
2291 }
2292 /* I believe the format comes from colorbuffer0. */
2293 size = track->aa.pitch * track->cb[0].cpp * track->maxy;
2294 size += track->aa.offset;
2295 if (size > radeon_bo_size(track->aa.robj)) {
2296 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "__drm_err("[drm] Buffer too small for AA resolve buffer %d " "(need %lu have %lu) !\n"
, i, size, radeon_bo_size(track->aa.robj))
2297 "(need %lu have %lu) !\n", i, size,__drm_err("[drm] Buffer too small for AA resolve buffer %d " "(need %lu have %lu) !\n"
, i, size, radeon_bo_size(track->aa.robj))
2298 radeon_bo_size(track->aa.robj))__drm_err("[drm] Buffer too small for AA resolve buffer %d " "(need %lu have %lu) !\n"
, i, size, radeon_bo_size(track->aa.robj))
;
2299 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",__drm_err("[drm] AA resolve buffer %d (%u %u %u %u)\n", i, track
->aa.pitch, track->cb[0].cpp, track->aa.offset, track
->maxy)
2300 i, track->aa.pitch, track->cb[0].cpp,__drm_err("[drm] AA resolve buffer %d (%u %u %u %u)\n", i, track
->aa.pitch, track->cb[0].cpp, track->aa.offset, track
->maxy)
2301 track->aa.offset, track->maxy)__drm_err("[drm] AA resolve buffer %d (%u %u %u %u)\n", i, track
->aa.pitch, track->cb[0].cpp, track->aa.offset, track
->maxy)
;
2302 return -EINVAL22;
2303 }
2304 }
2305 track->aa_dirty = false0;
2306
2307 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2308 if (track->vap_vf_cntl & (1 << 14)) {
2309 nverts = track->vap_alt_nverts;
2310 } else {
2311 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
2312 }
2313 switch (prim_walk) {
2314 case 1:
2315 for (i = 0; i < track->num_arrays; i++) {
2316 size = track->arrays[i].esize * track->max_indx * 4;
2317 if (track->arrays[i].robj == NULL((void *)0)) {
2318 DRM_ERROR("(PW %u) Vertex array %u no buffer "__drm_err("(PW %u) Vertex array %u no buffer " "bound\n", prim_walk
, i)
2319 "bound\n", prim_walk, i)__drm_err("(PW %u) Vertex array %u no buffer " "bound\n", prim_walk
, i)
;
2320 return -EINVAL22;
2321 }
2322 if (size > radeon_bo_size(track->arrays[i].robj)) {
2323 dev_err(rdev->dev, "(PW %u) Vertex array %u "printf("drm:pid%d:%s *ERROR* " "(PW %u) Vertex array %u " "need %lu dwords have %lu dwords\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , prim_walk
, i, size >> 2, radeon_bo_size(track->arrays[i].robj
) >> 2)
2324 "need %lu dwords have %lu dwords\n",printf("drm:pid%d:%s *ERROR* " "(PW %u) Vertex array %u " "need %lu dwords have %lu dwords\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , prim_walk
, i, size >> 2, radeon_bo_size(track->arrays[i].robj
) >> 2)
2325 prim_walk, i, size >> 2,printf("drm:pid%d:%s *ERROR* " "(PW %u) Vertex array %u " "need %lu dwords have %lu dwords\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , prim_walk
, i, size >> 2, radeon_bo_size(track->arrays[i].robj
) >> 2)
2326 radeon_bo_size(track->arrays[i].robj)printf("drm:pid%d:%s *ERROR* " "(PW %u) Vertex array %u " "need %lu dwords have %lu dwords\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , prim_walk
, i, size >> 2, radeon_bo_size(track->arrays[i].robj
) >> 2)
2327 >> 2)printf("drm:pid%d:%s *ERROR* " "(PW %u) Vertex array %u " "need %lu dwords have %lu dwords\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , prim_walk
, i, size >> 2, radeon_bo_size(track->arrays[i].robj
) >> 2)
;
2328 DRM_ERROR("Max indices %u\n", track->max_indx)__drm_err("Max indices %u\n", track->max_indx);
2329 return -EINVAL22;
2330 }
2331 }
2332 break;
2333 case 2:
2334 for (i = 0; i < track->num_arrays; i++) {
2335 size = track->arrays[i].esize * (nverts - 1) * 4;
2336 if (track->arrays[i].robj == NULL((void *)0)) {
2337 DRM_ERROR("(PW %u) Vertex array %u no buffer "__drm_err("(PW %u) Vertex array %u no buffer " "bound\n", prim_walk
, i)
2338 "bound\n", prim_walk, i)__drm_err("(PW %u) Vertex array %u no buffer " "bound\n", prim_walk
, i)
;
2339 return -EINVAL22;
2340 }
2341 if (size > radeon_bo_size(track->arrays[i].robj)) {
2342 dev_err(rdev->dev, "(PW %u) Vertex array %u "printf("drm:pid%d:%s *ERROR* " "(PW %u) Vertex array %u " "need %lu dwords have %lu dwords\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , prim_walk
, i, size >> 2, radeon_bo_size(track->arrays[i].robj
) >> 2)
2343 "need %lu dwords have %lu dwords\n",printf("drm:pid%d:%s *ERROR* " "(PW %u) Vertex array %u " "need %lu dwords have %lu dwords\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , prim_walk
, i, size >> 2, radeon_bo_size(track->arrays[i].robj
) >> 2)
2344 prim_walk, i, size >> 2,printf("drm:pid%d:%s *ERROR* " "(PW %u) Vertex array %u " "need %lu dwords have %lu dwords\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , prim_walk
, i, size >> 2, radeon_bo_size(track->arrays[i].robj
) >> 2)
2345 radeon_bo_size(track->arrays[i].robj)printf("drm:pid%d:%s *ERROR* " "(PW %u) Vertex array %u " "need %lu dwords have %lu dwords\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , prim_walk
, i, size >> 2, radeon_bo_size(track->arrays[i].robj
) >> 2)
2346 >> 2)printf("drm:pid%d:%s *ERROR* " "(PW %u) Vertex array %u " "need %lu dwords have %lu dwords\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , prim_walk
, i, size >> 2, radeon_bo_size(track->arrays[i].robj
) >> 2)
;
2347 return -EINVAL22;
2348 }
2349 }
2350 break;
2351 case 3:
2352 size = track->vtx_size * nverts;
2353 if (size != track->immd_dwords) {
2354 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",__drm_err("IMMD draw %u dwors but needs %lu dwords\n", track->
immd_dwords, size)
2355 track->immd_dwords, size)__drm_err("IMMD draw %u dwors but needs %lu dwords\n", track->
immd_dwords, size)
;
2356 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",__drm_err("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", nverts
, track->vtx_size)
2357 nverts, track->vtx_size)__drm_err("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", nverts
, track->vtx_size)
;
2358 return -EINVAL22;
2359 }
2360 break;
2361 default:
2362 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",__drm_err("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n"
, prim_walk)
2363 prim_walk)__drm_err("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n"
, prim_walk)
;
2364 return -EINVAL22;
2365 }
2366
2367 if (track->tex_dirty) {
2368 track->tex_dirty = false0;
2369 return r100_cs_track_texture_check(rdev, track);
2370 }
2371 return 0;
2372}
2373
2374void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
2375{
2376 unsigned i, face;
2377
2378 track->cb_dirty = true1;
2379 track->zb_dirty = true1;
2380 track->tex_dirty = true1;
2381 track->aa_dirty = true1;
2382
2383 if (rdev->family < CHIP_R300) {
2384 track->num_cb = 1;
2385 if (rdev->family <= CHIP_RS200)
2386 track->num_texture = 3;
2387 else
2388 track->num_texture = 6;
2389 track->maxy = 2048;
2390 track->separate_cube = true1;
2391 } else {
2392 track->num_cb = 4;
2393 track->num_texture = 16;
2394 track->maxy = 4096;
2395 track->separate_cube = false0;
2396 track->aaresolve = false0;
2397 track->aa.robj = NULL((void *)0);
2398 }
2399
2400 for (i = 0; i < track->num_cb; i++) {
2401 track->cb[i].robj = NULL((void *)0);
2402 track->cb[i].pitch = 8192;
2403 track->cb[i].cpp = 16;
2404 track->cb[i].offset = 0;
2405 }
2406 track->z_enabled = true1;
2407 track->zb.robj = NULL((void *)0);
2408 track->zb.pitch = 8192;
2409 track->zb.cpp = 4;
2410 track->zb.offset = 0;
2411 track->vtx_size = 0x7F;
2412 track->immd_dwords = 0xFFFFFFFFUL;
2413 track->num_arrays = 11;
2414 track->max_indx = 0x00FFFFFFUL;
2415 for (i = 0; i < track->num_arrays; i++) {
2416 track->arrays[i].robj = NULL((void *)0);
2417 track->arrays[i].esize = 0x7F;
2418 }
2419 for (i = 0; i < track->num_texture; i++) {
2420 track->textures[i].compress_format = R100_TRACK_COMP_NONE0;
2421 track->textures[i].pitch = 16536;
2422 track->textures[i].width = 16536;
2423 track->textures[i].height = 16536;
2424 track->textures[i].width_11 = 1 << 11;
2425 track->textures[i].height_11 = 1 << 11;
2426 track->textures[i].num_levels = 12;
2427 if (rdev->family <= CHIP_RS200) {
2428 track->textures[i].tex_coord_type = 0;
2429 track->textures[i].txdepth = 0;
2430 } else {
2431 track->textures[i].txdepth = 16;
2432 track->textures[i].tex_coord_type = 1;
2433 }
2434 track->textures[i].cpp = 64;
2435 track->textures[i].robj = NULL((void *)0);
2436 /* CS IB emission code makes sure texture unit are disabled */
2437 track->textures[i].enabled = false0;
2438 track->textures[i].lookup_disable = false0;
2439 track->textures[i].roundup_w = true1;
2440 track->textures[i].roundup_h = true1;
2441 if (track->separate_cube)
2442 for (face = 0; face < 5; face++) {
2443 track->textures[i].cube_info[face].robj = NULL((void *)0);
2444 track->textures[i].cube_info[face].width = 16536;
2445 track->textures[i].cube_info[face].height = 16536;
2446 track->textures[i].cube_info[face].offset = 0;
2447 }
2448 }
2449}
2450
2451/*
2452 * Global GPU functions
2453 */
2454static void r100_errata(struct radeon_device *rdev)
2455{
2456 rdev->pll_errata = 0;
2457
2458 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
45
Assuming field 'family' is not equal to CHIP_RV200
46
Assuming field 'family' is not equal to CHIP_RS200
47
Taking false branch
2459 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
2460 }
2461
2462 if (rdev->family == CHIP_RV100 ||
48
Assuming field 'family' is not equal to CHIP_RV100
50
Taking false branch
2463 rdev->family == CHIP_RS100 ||
49
Assuming field 'family' is not equal to CHIP_RS100
2464 rdev->family
49.1
Field 'family' is not equal to CHIP_RS200
49.1
Field 'family' is not equal to CHIP_RS200
49.1
Field 'family' is not equal to CHIP_RS200
== CHIP_RS200) {
2465 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
2466 }
2467}
51
Returning without writing to 'rdev->me_fw'
2468
2469static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
2470{
2471 unsigned i;
2472 uint32_t tmp;
2473
2474 for (i = 0; i < rdev->usec_timeout; i++) {
2475 tmp = RREG32(RADEON_RBBM_STATUS)r100_mm_rreg(rdev, (0x0e40), 0) & RADEON_RBBM_FIFOCNT_MASK0x007f;
2476 if (tmp >= n) {
2477 return 0;
2478 }
2479 udelay(1);
2480 }
2481 return -1;
2482}
2483
2484int r100_gui_wait_for_idle(struct radeon_device *rdev)
2485{
2486 unsigned i;
2487 uint32_t tmp;
2488
2489 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
2490 pr_warn("radeon: wait for empty RBBM fifo failed! Bad things might happen.\n")printk("\0014" "radeon: wait for empty RBBM fifo failed! Bad things might happen.\n"
)
;
2491 }
2492 for (i = 0; i < rdev->usec_timeout; i++) {
2493 tmp = RREG32(RADEON_RBBM_STATUS)r100_mm_rreg(rdev, (0x0e40), 0);
2494 if (!(tmp & RADEON_RBBM_ACTIVE(1 << 31))) {
2495 return 0;
2496 }
2497 udelay(1);
2498 }
2499 return -1;
2500}
2501
2502int r100_mc_wait_for_idle(struct radeon_device *rdev)
2503{
2504 unsigned i;
2505 uint32_t tmp;
2506
2507 for (i = 0; i < rdev->usec_timeout; i++) {
2508 /* read MC_STATUS */
2509 tmp = RREG32(RADEON_MC_STATUS)r100_mm_rreg(rdev, (0x0150), 0);
2510 if (tmp & RADEON_MC_IDLE(1 << 2)) {
2511 return 0;
2512 }
2513 udelay(1);
2514 }
2515 return -1;
2516}
2517
2518bool_Bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2519{
2520 u32 rbbm_status;
2521
2522 rbbm_status = RREG32(R_000E40_RBBM_STATUS)r100_mm_rreg(rdev, (0x000E40), 0);
2523 if (!G_000E40_GUI_ACTIVE(rbbm_status)(((rbbm_status) >> 31) & 0x1)) {
2524 radeon_ring_lockup_update(rdev, ring);
2525 return false0;
2526 }
2527 return radeon_ring_test_lockup(rdev, ring);
2528}
2529
2530/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
2531void r100_enable_bm(struct radeon_device *rdev)
2532{
2533 uint32_t tmp;
2534 /* Enable bus mastering */
2535 tmp = RREG32(RADEON_BUS_CNTL)r100_mm_rreg(rdev, (0x0030), 0) & ~RADEON_BUS_MASTER_DIS(1 << 6);
238
Calling 'r100_mm_rreg'
242
Returning from 'r100_mm_rreg'
2536 WREG32(RADEON_BUS_CNTL, tmp)r100_mm_wreg(rdev, (0x0030), (tmp), 0);
243
Calling 'r100_mm_wreg'
246
Returning from 'r100_mm_wreg'
2537}
247
Returning without writing to 'rdev->me_fw'
2538
2539void r100_bm_disable(struct radeon_device *rdev)
2540{
2541 u32 tmp;
2542
2543 /* disable bus mastering */
2544 tmp = RREG32(R_000030_BUS_CNTL)r100_mm_rreg(rdev, (0x000030), 0);
2545 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044)r100_mm_wreg(rdev, (0x000030), ((tmp & 0xFFFFFFFF) | 0x00000044
), 0)
;
2546 mdelay(1);
2547 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042)r100_mm_wreg(rdev, (0x000030), ((tmp & 0xFFFFFFFF) | 0x00000042
), 0)
;
2548 mdelay(1);
2549 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040)r100_mm_wreg(rdev, (0x000030), ((tmp & 0xFFFFFFFF) | 0x00000040
), 0)
;
2550 tmp = RREG32(RADEON_BUS_CNTL)r100_mm_rreg(rdev, (0x0030), 0);
2551 mdelay(1);
2552 pci_clear_master(rdev->pdev);
2553 mdelay(1);
2554}
2555
2556int r100_asic_reset(struct radeon_device *rdev, bool_Bool hard)
2557{
2558 struct r100_mc_save save;
2559 u32 status, tmp;
2560 int ret = 0;
2561
2562 status = RREG32(R_000E40_RBBM_STATUS)r100_mm_rreg(rdev, (0x000E40), 0);
2563 if (!G_000E40_GUI_ACTIVE(status)(((status) >> 31) & 0x1)) {
2564 return 0;
2565 }
2566 r100_mc_stop(rdev, &save);
2567 status = RREG32(R_000E40_RBBM_STATUS)r100_mm_rreg(rdev, (0x000E40), 0);
2568 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status)do { } while(0);
2569 /* stop CP */
2570 WREG32(RADEON_CP_CSQ_CNTL, 0)r100_mm_wreg(rdev, (0x0740), (0), 0);
2571 tmp = RREG32(RADEON_CP_RB_CNTL)r100_mm_rreg(rdev, (0x0704), 0);
2572 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA)r100_mm_wreg(rdev, (0x0704), (tmp | (1 << 31)), 0);
2573 WREG32(RADEON_CP_RB_RPTR_WR, 0)r100_mm_wreg(rdev, (0x071c), (0), 0);
2574 WREG32(RADEON_CP_RB_WPTR, 0)r100_mm_wreg(rdev, (0x0714), (0), 0);
2575 WREG32(RADEON_CP_RB_CNTL, tmp)r100_mm_wreg(rdev, (0x0704), (tmp), 0);
2576 /* save PCI state */
2577 pci_save_state(rdev->pdev);
2578 /* disable bus mastering */
2579 r100_bm_disable(rdev);
2580 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |r100_mm_wreg(rdev, (0x0000F0), ((((1) & 0x1) << 2) |
(((1) & 0x1) << 3) | (((1) & 0x1) << 4) |
(((1) & 0x1) << 6)), 0)
2581 S_0000F0_SOFT_RESET_RE(1) |r100_mm_wreg(rdev, (0x0000F0), ((((1) & 0x1) << 2) |
(((1) & 0x1) << 3) | (((1) & 0x1) << 4) |
(((1) & 0x1) << 6)), 0)
2582 S_0000F0_SOFT_RESET_PP(1) |r100_mm_wreg(rdev, (0x0000F0), ((((1) & 0x1) << 2) |
(((1) & 0x1) << 3) | (((1) & 0x1) << 4) |
(((1) & 0x1) << 6)), 0)
2583 S_0000F0_SOFT_RESET_RB(1))r100_mm_wreg(rdev, (0x0000F0), ((((1) & 0x1) << 2) |
(((1) & 0x1) << 3) | (((1) & 0x1) << 4) |
(((1) & 0x1) << 6)), 0)
;
2584 RREG32(R_0000F0_RBBM_SOFT_RESET)r100_mm_rreg(rdev, (0x0000F0), 0);
2585 mdelay(500);
2586 WREG32(R_0000F0_RBBM_SOFT_RESET, 0)r100_mm_wreg(rdev, (0x0000F0), (0), 0);
2587 mdelay(1);
2588 status = RREG32(R_000E40_RBBM_STATUS)r100_mm_rreg(rdev, (0x000E40), 0);
2589 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status)do { } while(0);
2590 /* reset CP */
2591 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1))r100_mm_wreg(rdev, (0x0000F0), ((((1) & 0x1) << 0))
, 0)
;
2592 RREG32(R_0000F0_RBBM_SOFT_RESET)r100_mm_rreg(rdev, (0x0000F0), 0);
2593 mdelay(500);
2594 WREG32(R_0000F0_RBBM_SOFT_RESET, 0)r100_mm_wreg(rdev, (0x0000F0), (0), 0);
2595 mdelay(1);
2596 status = RREG32(R_000E40_RBBM_STATUS)r100_mm_rreg(rdev, (0x000E40), 0);
2597 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status)do { } while(0);
2598 /* restore PCI & busmastering */
2599 pci_restore_state(rdev->pdev);
2600 r100_enable_bm(rdev);
2601 /* Check if GPU is idle */
2602 if (G_000E40_SE_BUSY(status)(((status) >> 20) & 0x1) || G_000E40_RE_BUSY(status)(((status) >> 21) & 0x1) ||
2603 G_000E40_TAM_BUSY(status)(((status) >> 22) & 0x1) || G_000E40_PB_BUSY(status)(((status) >> 24) & 0x1)) {
2604 dev_err(rdev->dev, "failed to reset GPU\n")printf("drm:pid%d:%s *ERROR* " "failed to reset GPU\n", ({struct
cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci
) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;
})->ci_curproc->p_p->ps_pid, __func__)
;
2605 ret = -1;
2606 } else
2607 dev_info(rdev->dev, "GPU reset succeed\n")do { } while(0);
2608 r100_mc_resume(rdev, &save);
2609 return ret;
2610}
2611
2612void r100_set_common_regs(struct radeon_device *rdev)
2613{
2614 struct drm_device *dev = rdev->ddev;
2615 bool_Bool force_dac2 = false0;
2616 u32 tmp;
2617
2618 /* set these so they don't interfere with anything */
2619 WREG32(RADEON_OV0_SCALE_CNTL, 0)r100_mm_wreg(rdev, (0x0420), (0), 0);
121
Calling 'r100_mm_wreg'
125
Returning from 'r100_mm_wreg'
2620 WREG32(RADEON_SUBPIC_CNTL, 0)r100_mm_wreg(rdev, (0x0540), (0), 0);
126
Calling 'r100_mm_wreg'
130
Returning from 'r100_mm_wreg'
2621 WREG32(RADEON_VIPH_CONTROL, 0)r100_mm_wreg(rdev, (0x0c40), (0), 0);
131
Calling 'r100_mm_wreg'
135
Returning from 'r100_mm_wreg'
2622 WREG32(RADEON_I2C_CNTL_1, 0)r100_mm_wreg(rdev, (0x0094), (0), 0);
136
Calling 'r100_mm_wreg'
139
Returning from 'r100_mm_wreg'
2623 WREG32(RADEON_DVI_I2C_CNTL_1, 0)r100_mm_wreg(rdev, (0x02e4), (0), 0);
140
Calling 'r100_mm_wreg'
143
Returning from 'r100_mm_wreg'
2624 WREG32(RADEON_CAP0_TRIG_CNTL, 0)r100_mm_wreg(rdev, (0x0950), (0), 0);
144
Calling 'r100_mm_wreg'
147
Returning from 'r100_mm_wreg'
2625 WREG32(RADEON_CAP1_TRIG_CNTL, 0)r100_mm_wreg(rdev, (0x09C0), (0), 0);
2626
2627 /* always set up dac2 on rn50 and some rv100 as lots
2628 * of servers seem to wire it up to a VGA port but
2629 * don't report it in the bios connector
2630 * table.
2631 */
2632 switch (dev->pdev->device) {
148
'Default' branch taken. Execution continues on line 2657
2633 /* RN50 */
2634 case 0x515e:
2635 case 0x5969:
2636 force_dac2 = true1;
2637 break;
2638 /* RV100*/
2639 case 0x5159:
2640 case 0x515a:
2641 /* DELL triple head servers */
2642 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
2643 ((dev->pdev->subsystem_device == 0x016c) ||
2644 (dev->pdev->subsystem_device == 0x016d) ||
2645 (dev->pdev->subsystem_device == 0x016e) ||
2646 (dev->pdev->subsystem_device == 0x016f) ||
2647 (dev->pdev->subsystem_device == 0x0170) ||
2648 (dev->pdev->subsystem_device == 0x017d) ||
2649 (dev->pdev->subsystem_device == 0x017e) ||
2650 (dev->pdev->subsystem_device == 0x0183) ||
2651 (dev->pdev->subsystem_device == 0x018a) ||
2652 (dev->pdev->subsystem_device == 0x019a)))
2653 force_dac2 = true1;
2654 break;
2655 }
2656
2657 if (force_dac2
148.1
'force_dac2' is false
148.1
'force_dac2' is false
148.1
'force_dac2' is false
) {
149
Taking false branch
2658 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG)r100_mm_rreg(rdev, (0x0d14), 0);
2659 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL)r100_mm_rreg(rdev, (0x088c), 0);
2660 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2)r100_mm_rreg(rdev, (0x007c), 0);
2661
2662 /* For CRT on DAC2, don't turn it on if BIOS didn't
2663 enable it, even it's detected.
2664 */
2665
2666 /* force it to crtc0 */
2667 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL(1 << 0);
2668 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL(1 << 1);
2669 disp_hw_debug |= RADEON_CRT2_DISP1_SEL(1 << 5);
2670
2671 /* set up the TV DAC */
2672 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL(1 << 2) |
2673 RADEON_TV_DAC_STD_MASK(3 << 8) |
2674 RADEON_TV_DAC_RDACPD(1 << 24) |
2675 RADEON_TV_DAC_GDACPD(1 << 25) |
2676 RADEON_TV_DAC_BDACPD(1 << 26) |
2677 RADEON_TV_DAC_BGADJ_MASK(0xf << 16) |
2678 RADEON_TV_DAC_DACADJ_MASK(0xf << 20));
2679 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK(1 << 0) |
2680 RADEON_TV_DAC_NHOLD(1 << 1) |
2681 RADEON_TV_DAC_STD_PS2(2 << 8) |
2682 (0x58 << 16));
2683
2684 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl)r100_mm_wreg(rdev, (0x088c), (tv_dac_cntl), 0);
2685 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug)r100_mm_wreg(rdev, (0x0d14), (disp_hw_debug), 0);
2686 WREG32(RADEON_DAC_CNTL2, dac2_cntl)r100_mm_wreg(rdev, (0x007c), (dac2_cntl), 0);
2687 }
2688
2689 /* switch PM block to ACPI mode */
2690 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL)rdev->pll_rreg(rdev, (0x0015));
2691 tmp &= ~RADEON_PM_MODE_SEL(1 << 13);
2692 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp)rdev->pll_wreg(rdev, (0x0015), (tmp));
2693
2694}
2695
2696/*
2697 * VRAM info
2698 */
2699static void r100_vram_get_type(struct radeon_device *rdev)
2700{
2701 uint32_t tmp;
2702
2703 rdev->mc.vram_is_ddr = false0;
2704 if (rdev->flags & RADEON_IS_IGP)
57
Assuming the condition is false
58
Taking false branch
2705 rdev->mc.vram_is_ddr = true1;
2706 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG)r100_mm_rreg(rdev, (0x0158), 0) & RADEON_MEM_CFG_TYPE_DDR(1 << 30))
59
Calling 'r100_mm_rreg'
63
Returning from 'r100_mm_rreg'
64
Assuming the condition is false
65
Taking false branch
2707 rdev->mc.vram_is_ddr = true1;
2708 if ((rdev->family
65.1
Field 'family' is not equal to CHIP_RV100
65.1
Field 'family' is not equal to CHIP_RV100
65.1
Field 'family' is not equal to CHIP_RV100
== CHIP_RV100) ||
66
Taking false branch
2709 (rdev->family
65.2
Field 'family' is not equal to CHIP_RS100
65.2
Field 'family' is not equal to CHIP_RS100
65.2
Field 'family' is not equal to CHIP_RS100
== CHIP_RS100) ||
2710 (rdev->family
65.3
Field 'family' is not equal to CHIP_RS200
65.3
Field 'family' is not equal to CHIP_RS200
65.3
Field 'family' is not equal to CHIP_RS200
== CHIP_RS200)) {
2711 tmp = RREG32(RADEON_MEM_CNTL)r100_mm_rreg(rdev, (0x0140), 0);
2712 if (tmp & RV100_HALF_MODE(1 << 3)) {
2713 rdev->mc.vram_width = 32;
2714 } else {
2715 rdev->mc.vram_width = 64;
2716 }
2717 if (rdev->flags & RADEON_SINGLE_CRTC) {
2718 rdev->mc.vram_width /= 4;
2719 rdev->mc.vram_is_ddr = true1;
2720 }
2721 } else if (rdev->family <= CHIP_RV280) {
67
Assuming field 'family' is > CHIP_RV280
68
Taking false branch
2722 tmp = RREG32(RADEON_MEM_CNTL)r100_mm_rreg(rdev, (0x0140), 0);
2723 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK0x01) {
2724 rdev->mc.vram_width = 128;
2725 } else {
2726 rdev->mc.vram_width = 64;
2727 }
2728 } else {
2729 /* newer IGPs */
2730 rdev->mc.vram_width = 128;
2731 }
2732}
69
Returning without writing to 'rdev->me_fw'
2733
2734static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2735{
2736 u32 aper_size;
2737 u8 byte;
2738
2739 aper_size = RREG32(RADEON_CONFIG_APER_SIZE)r100_mm_rreg(rdev, (0x0108), 0);
73
Calling 'r100_mm_rreg'
77
Returning from 'r100_mm_rreg'
2740
2741 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
2742 * that is has the 2nd generation multifunction PCI interface
2743 */
2744 if (rdev->family
77.1
Field 'family' is not equal to CHIP_RV280
77.1
Field 'family' is not equal to CHIP_RV280
77.1
Field 'family' is not equal to CHIP_RV280
== CHIP_RV280 ||
79
Taking false branch
2745 rdev->family >= CHIP_RV350) {
78
Assuming field 'family' is < CHIP_RV350
2746 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x0130), 0); tmp_ &=
(~(1 << 23)); tmp_ |= (((1 << 23)) & ~(~(1 <<
23))); r100_mm_wreg(rdev, (0x0130), (tmp_), 0); } while (0)
2747 ~RADEON_HDP_APER_CNTL)do { uint32_t tmp_ = r100_mm_rreg(rdev, (0x0130), 0); tmp_ &=
(~(1 << 23)); tmp_ |= (((1 << 23)) & ~(~(1 <<
23))); r100_mm_wreg(rdev, (0x0130), (tmp_), 0); } while (0)
;
2748 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n")printk("\0016" "[" "drm" "] " "Generation 2 PCI interface, using max accessible memory\n"
)
;
2749 return aper_size * 2;
2750 }
2751
2752 /* Older cards have all sorts of funny issues to deal with. First
2753 * check if it's a multifunction card by reading the PCI config
2754 * header type... Limit those to one aperture size
2755 */
2756 pci_read_config_byte(rdev->pdev, 0xe, &byte);
2757 if (byte & 0x80) {
80
Assuming the condition is false
81
Taking false branch
2758 DRM_INFO("Generation 1 PCI interface in multifunction mode\n")printk("\0016" "[" "drm" "] " "Generation 1 PCI interface in multifunction mode\n"
)
;
2759 DRM_INFO("Limiting VRAM to one aperture\n")printk("\0016" "[" "drm" "] " "Limiting VRAM to one aperture\n"
)
;
2760 return aper_size;
2761 }
2762
2763 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2764 * have set it up. We don't write this as it's broken on some ASICs but
2765 * we expect the BIOS to have done the right thing (might be too optimistic...)
2766 */
2767 if (RREG32(RADEON_HOST_PATH_CNTL)r100_mm_rreg(rdev, (0x0130), 0) & RADEON_HDP_APER_CNTL(1 << 23))
82
Calling 'r100_mm_rreg'
85
Returning from 'r100_mm_rreg'
86
Assuming the condition is false
87
Taking false branch
2768 return aper_size * 2;
2769 return aper_size;
88
Returning without writing to 'rdev->me_fw'
2770}
2771
2772void r100_vram_init_sizes(struct radeon_device *rdev)
2773{
2774 u64 config_aper_size;
2775
2776 /* work out accessible VRAM */
2777 rdev->mc.aper_base = rdev->fb_aper_offset;
2778 rdev->mc.aper_size = rdev->fb_aper_size;
2779 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
72
Calling 'r100_get_accessible_vram'
89
Returning from 'r100_get_accessible_vram'
2780 /* FIXME we don't use the second aperture yet when we could use it */
2781 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
90
Assuming field 'visible_vram_size' is <= field 'aper_size'
91
Taking false branch
2782 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2783 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE)r100_mm_rreg(rdev, (0x0108), 0);
92
Calling 'r100_mm_rreg'
95
Returning from 'r100_mm_rreg'
2784 if (rdev->flags & RADEON_IS_IGP) {
96
Taking false branch
2785 uint32_t tom;
2786 /* read NB_TOM to get the amount of ram stolen for the GPU */
2787 tom = RREG32(RADEON_NB_TOM)r100_mm_rreg(rdev, (0x15c), 0);
2788 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2789 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size)r100_mm_wreg(rdev, (0x00f8), (rdev->mc.real_vram_size), 0);
2790 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2791 } else {
2792 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE)r100_mm_rreg(rdev, (0x00f8), 0);
97
Calling 'r100_mm_rreg'
101
Returning from 'r100_mm_rreg'
2793 /* Some production boards of m6 will report 0
2794 * if it's 8 MB
2795 */
2796 if (rdev->mc.real_vram_size == 0) {
102
Assuming field 'real_vram_size' is not equal to 0
103
Taking false branch
2797 rdev->mc.real_vram_size = 8192 * 1024;
2798 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size)r100_mm_wreg(rdev, (0x00f8), (rdev->mc.real_vram_size), 0);
2799 }
2800 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2801 * Novell bug 204882 + along with lots of ubuntu ones
2802 */
2803 if (rdev->mc.aper_size > config_aper_size)
104
Assuming 'config_aper_size' is >= field 'aper_size'
105
Taking false branch
2804 config_aper_size = rdev->mc.aper_size;
2805
2806 if (config_aper_size > rdev->mc.real_vram_size)
106
Assuming 'config_aper_size' is > field 'real_vram_size'
107
Taking true branch
2807 rdev->mc.mc_vram_size = config_aper_size;
2808 else
2809 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2810 }
2811}
2812
2813void r100_vga_set_state(struct radeon_device *rdev, bool_Bool state)
2814{
2815 uint32_t temp;
2816
2817 temp = RREG32(RADEON_CONFIG_CNTL)r100_mm_rreg(rdev, (0x00e0), 0);
2818 if (!state) {
2819 temp &= ~RADEON_CFG_VGA_RAM_EN(1 << 8);
2820 temp |= RADEON_CFG_VGA_IO_DIS(1 << 9);
2821 } else {
2822 temp &= ~RADEON_CFG_VGA_IO_DIS(1 << 9);
2823 }
2824 WREG32(RADEON_CONFIG_CNTL, temp)r100_mm_wreg(rdev, (0x00e0), (temp), 0);
2825}
2826
2827static void r100_mc_init(struct radeon_device *rdev)
2828{
2829 u64 base;
2830
2831 r100_vram_get_type(rdev);
56
Calling 'r100_vram_get_type'
70
Returning from 'r100_vram_get_type'
2832 r100_vram_init_sizes(rdev);
71
Calling 'r100_vram_init_sizes'
108
Returning from 'r100_vram_init_sizes'
2833 base = rdev->mc.aper_base;
2834 if (rdev->flags & RADEON_IS_IGP)
109
Taking false branch
2835 base = (RREG32(RADEON_NB_TOM)r100_mm_rreg(rdev, (0x15c), 0) & 0xffff) << 16;
2836 radeon_vram_location(rdev, &rdev->mc, base);
2837 rdev->mc.gtt_base_align = 0;
2838 if (!(rdev->flags & RADEON_IS_AGP))
110
Assuming the condition is false
111
Taking false branch
2839 radeon_gtt_location(rdev, &rdev->mc);
2840 radeon_update_bandwidth_info(rdev);
2841}
2842
2843
2844/*
2845 * Indirect registers accessor
2846 */
2847void r100_pll_errata_after_index(struct radeon_device *rdev)
2848{
2849 if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2850 (void)RREG32(RADEON_CLOCK_CNTL_DATA)r100_mm_rreg(rdev, (0x000c), 0);
2851 (void)RREG32(RADEON_CRTC_GEN_CNTL)r100_mm_rreg(rdev, (0x0050), 0);
2852 }
2853}
2854
2855static void r100_pll_errata_after_data(struct radeon_device *rdev)
2856{
2857 /* This workarounds is necessary on RV100, RS100 and RS200 chips
2858 * or the chip could hang on a subsequent access
2859 */
2860 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2861 mdelay(5);
2862 }
2863
2864 /* This function is required to workaround a hardware bug in some (all?)
2865 * revisions of the R300. This workaround should be called after every
2866 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
2867 * may not be correct.
2868 */
2869 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2870 uint32_t save, tmp;
2871
2872 save = RREG32(RADEON_CLOCK_CNTL_INDEX)r100_mm_rreg(rdev, (0x0008), 0);
2873 tmp = save & ~(0x3f | RADEON_PLL_WR_EN(1 << 7));
2874 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp)r100_mm_wreg(rdev, (0x0008), (tmp), 0);
2875 tmp = RREG32(RADEON_CLOCK_CNTL_DATA)r100_mm_rreg(rdev, (0x000c), 0);
2876 WREG32(RADEON_CLOCK_CNTL_INDEX, save)r100_mm_wreg(rdev, (0x0008), (save), 0);
2877 }
2878}
2879
2880uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2881{
2882 unsigned long flags;
2883 uint32_t data;
2884
2885 spin_lock_irqsave(&rdev->pll_idx_lock, flags)do { flags = 0; mtx_enter(&rdev->pll_idx_lock); } while
(0)
;
2886 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f)iowrite8(reg & 0x3f, (rdev->rmmio) + (0x0008));
2887 r100_pll_errata_after_index(rdev);
2888 data = RREG32(RADEON_CLOCK_CNTL_DATA)r100_mm_rreg(rdev, (0x000c), 0);
2889 r100_pll_errata_after_data(rdev);
2890 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags)do { (void)(flags); mtx_leave(&rdev->pll_idx_lock); } while
(0)
;
2891 return data;
2892}
2893
2894void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2895{
2896 unsigned long flags;
2897
2898 spin_lock_irqsave(&rdev->pll_idx_lock, flags)do { flags = 0; mtx_enter(&rdev->pll_idx_lock); } while
(0)
;
2899 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN))iowrite8(((reg & 0x3f) | (1 << 7)), (rdev->rmmio
) + (0x0008))
;
2900 r100_pll_errata_after_index(rdev);
2901 WREG32(RADEON_CLOCK_CNTL_DATA, v)r100_mm_wreg(rdev, (0x000c), (v), 0);
2902 r100_pll_errata_after_data(rdev);
2903 spin_unlock_irqrestore(&rdev->pll_idx_lock, flags)do { (void)(flags); mtx_leave(&rdev->pll_idx_lock); } while
(0)
;
2904}
2905
2906static void r100_set_safe_registers(struct radeon_device *rdev)
2907{
2908 if (ASIC_IS_RN50(rdev)((rdev->pdev->device == 0x515e) || (rdev->pdev->device
== 0x5969))
) {
2909 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2910 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm)(sizeof((rn50_reg_safe_bm)) / sizeof((rn50_reg_safe_bm)[0]));
2911 } else if (rdev->family < CHIP_R200) {
2912 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2913 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm)(sizeof((r100_reg_safe_bm)) / sizeof((r100_reg_safe_bm)[0]));
2914 } else {
2915 r200_set_safe_registers(rdev);
2916 }
2917}
2918
2919/*
2920 * Debugfs info
2921 */
2922#if defined(CONFIG_DEBUG_FS)
2923static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2924{
2925 struct drm_info_node *node = (struct drm_info_node *) m->private;
2926 struct drm_device *dev = node->minor->dev;
2927 struct radeon_device *rdev = dev->dev_private;
2928 uint32_t reg, value;
2929 unsigned i;
2930
2931 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS)r100_mm_rreg(rdev, (0x0e40), 0));
2932 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C)r100_mm_rreg(rdev, (0xE7C), 0));
2933 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)r100_mm_rreg(rdev, (0x7C0), 0));
2934 for (i = 0; i < 64; i++) {
2935 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100)r100_mm_wreg(rdev, (0xE70), (i | 0x100), 0);
2936 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA)r100_mm_rreg(rdev, (0xE74), 0) - 1) >> 2;
2937 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i)r100_mm_wreg(rdev, (0xE70), (i), 0);
2938 value = RREG32(RADEON_RBBM_CMDFIFO_DATA)r100_mm_rreg(rdev, (0xE74), 0);
2939 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2940 }
2941 return 0;
2942}
2943
2944static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2945{
2946 struct drm_info_node *node = (struct drm_info_node *) m->private;
2947 struct drm_device *dev = node->minor->dev;
2948 struct radeon_device *rdev = dev->dev_private;
2949 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX0];
2950 uint32_t rdp, wdp;
2951 unsigned count, i, j;
2952
2953 radeon_ring_free_size(rdev, ring);
2954 rdp = RREG32(RADEON_CP_RB_RPTR)r100_mm_rreg(rdev, (0x0710), 0);
2955 wdp = RREG32(RADEON_CP_RB_WPTR)r100_mm_rreg(rdev, (0x0714), 0);
2956 count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
2957 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)r100_mm_rreg(rdev, (0x7C0), 0));
2958 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2959 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2960 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2961 seq_printf(m, "%u dwords in ring\n", count);
2962 if (ring->ready) {
2963 for (j = 0; j <= count; j++) {
2964 i = (rdp + j) & ring->ptr_mask;
2965 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2966 }
2967 }
2968 return 0;
2969}
2970
2971
2972static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2973{
2974 struct drm_info_node *node = (struct drm_info_node *) m->private;
2975 struct drm_device *dev = node->minor->dev;
2976 struct radeon_device *rdev = dev->dev_private;
2977 uint32_t csq_stat, csq2_stat, tmp;
2978 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2979 unsigned i;
2980
2981 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)r100_mm_rreg(rdev, (0x7C0), 0));
2982 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE)r100_mm_rreg(rdev, (0x0744), 0));
2983 csq_stat = RREG32(RADEON_CP_CSQ_STAT)r100_mm_rreg(rdev, (0x07f8), 0);
2984 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT)r100_mm_rreg(rdev, (0x07fc), 0);
2985 r_rptr = (csq_stat >> 0) & 0x3ff;
2986 r_wptr = (csq_stat >> 10) & 0x3ff;
2987 ib1_rptr = (csq_stat >> 20) & 0x3ff;
2988 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2989 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2990 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2991 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2992 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2993 seq_printf(m, "Ring rptr %u\n", r_rptr);
2994 seq_printf(m, "Ring wptr %u\n", r_wptr);
2995 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2996 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2997 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2998 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
2999 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
3000 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
3001 seq_printf(m, "Ring fifo:\n");
3002 for (i = 0; i < 256; i++) {
3003 WREG32(RADEON_CP_CSQ_ADDR, i << 2)r100_mm_wreg(rdev, (0x07f0), (i << 2), 0);
3004 tmp = RREG32(RADEON_CP_CSQ_DATA)r100_mm_rreg(rdev, (0x07f4), 0);
3005 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
3006 }
3007 seq_printf(m, "Indirect1 fifo:\n");
3008 for (i = 256; i <= 512; i++) {
3009 WREG32(RADEON_CP_CSQ_ADDR, i << 2)r100_mm_wreg(rdev, (0x07f0), (i << 2), 0);
3010 tmp = RREG32(RADEON_CP_CSQ_DATA)r100_mm_rreg(rdev, (0x07f4), 0);
3011 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
3012 }
3013 seq_printf(m, "Indirect2 fifo:\n");
3014 for (i = 640; i < ib1_wptr; i++) {
3015 WREG32(RADEON_CP_CSQ_ADDR, i << 2)r100_mm_wreg(rdev, (0x07f0), (i << 2), 0);
3016 tmp = RREG32(RADEON_CP_CSQ_DATA)r100_mm_rreg(rdev, (0x07f4), 0);
3017 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
3018 }
3019 return 0;
3020}
3021
3022static int r100_debugfs_mc_info(struct seq_file *m, void *data)
3023{
3024 struct drm_info_node *node = (struct drm_info_node *) m->private;
3025 struct drm_device *dev = node->minor->dev;
3026 struct radeon_device *rdev = dev->dev_private;
3027 uint32_t tmp;
3028
3029 tmp = RREG32(RADEON_CONFIG_MEMSIZE)r100_mm_rreg(rdev, (0x00f8), 0);
3030 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
3031 tmp = RREG32(RADEON_MC_FB_LOCATION)r100_mm_rreg(rdev, (0x0148), 0);
3032 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
3033 tmp = RREG32(RADEON_BUS_CNTL)r100_mm_rreg(rdev, (0x0030), 0);
3034 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
3035 tmp = RREG32(RADEON_MC_AGP_LOCATION)r100_mm_rreg(rdev, (0x014c), 0);
3036 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
3037 tmp = RREG32(RADEON_AGP_BASE)r100_mm_rreg(rdev, (0x0170), 0);
3038 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
3039 tmp = RREG32(RADEON_HOST_PATH_CNTL)r100_mm_rreg(rdev, (0x0130), 0);
3040 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
3041 tmp = RREG32(0x01D0)r100_mm_rreg(rdev, (0x01D0), 0);
3042 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
3043 tmp = RREG32(RADEON_AIC_LO_ADDR)r100_mm_rreg(rdev, (0x01dc), 0);
3044 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
3045 tmp = RREG32(RADEON_AIC_HI_ADDR)r100_mm_rreg(rdev, (0x01e0), 0);
3046 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
3047 tmp = RREG32(0x01E4)r100_mm_rreg(rdev, (0x01E4), 0);
3048 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
3049 return 0;
3050}
3051
3052static struct drm_info_list r100_debugfs_rbbm_list[] = {
3053 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL((void *)0)},
3054};
3055
3056static struct drm_info_list r100_debugfs_cp_list[] = {
3057 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL((void *)0)},
3058 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL((void *)0)},
3059};
3060
3061static struct drm_info_list r100_debugfs_mc_info_list[] = {
3062 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL((void *)0)},
3063};
3064#endif
3065
3066int r100_debugfs_rbbm_init(struct radeon_device *rdev)
3067{
3068#if defined(CONFIG_DEBUG_FS)
3069 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
3070#else
3071 return 0;
3072#endif
3073}
3074
3075int r100_debugfs_cp_init(struct radeon_device *rdev)
3076{
3077#if defined(CONFIG_DEBUG_FS)
3078 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
3079#else
3080 return 0;
276
Returning without writing to 'rdev->me_fw'
3081#endif
3082}
3083
3084int r100_debugfs_mc_info_init(struct radeon_device *rdev)
3085{
3086#if defined(CONFIG_DEBUG_FS)
3087 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
3088#else
3089 return 0;
3
Returning without writing to 'rdev->me_fw'
3090#endif
3091}
3092
3093int r100_set_surface_reg(struct radeon_device *rdev, int reg,
3094 uint32_t tiling_flags, uint32_t pitch,
3095 uint32_t offset, uint32_t obj_size)
3096{
3097 int surf_index = reg * 16;
3098 int flags = 0;
3099
3100 if (rdev->family <= CHIP_RS200) {
3101 if ((tiling_flags & (RADEON_TILING_MACRO0x1|RADEON_TILING_MICRO0x2))
3102 == (RADEON_TILING_MACRO0x1|RADEON_TILING_MICRO0x2))
3103 flags |= RADEON_SURF_TILE_COLOR_BOTH(1 << 16);
3104 if (tiling_flags & RADEON_TILING_MACRO0x1)
3105 flags |= RADEON_SURF_TILE_COLOR_MACRO(0 << 16);
3106 /* setting pitch to 0 disables tiling */
3107 if ((tiling_flags & (RADEON_TILING_MACRO0x1|RADEON_TILING_MICRO0x2))
3108 == 0)
3109 pitch = 0;
3110 } else if (rdev->family <= CHIP_RV280) {
3111 if (tiling_flags & (RADEON_TILING_MACRO0x1))
3112 flags |= R200_SURF_TILE_COLOR_MACRO(1 << 16);
3113 if (tiling_flags & RADEON_TILING_MICRO0x2)
3114 flags |= R200_SURF_TILE_COLOR_MICRO(2 << 16);
3115 } else {
3116 if (tiling_flags & RADEON_TILING_MACRO0x1)
3117 flags |= R300_SURF_TILE_MACRO(1<<16);
3118 if (tiling_flags & RADEON_TILING_MICRO0x2)
3119 flags |= R300_SURF_TILE_MICRO(2<<16);
3120 }
3121
3122 if (tiling_flags & RADEON_TILING_SWAP_16BIT0x4)
3123 flags |= RADEON_SURF_AP0_SWP_16BPP(1 << 20) | RADEON_SURF_AP1_SWP_16BPP(1 << 22);
3124 if (tiling_flags & RADEON_TILING_SWAP_32BIT0x8)
3125 flags |= RADEON_SURF_AP0_SWP_32BPP(1 << 21) | RADEON_SURF_AP1_SWP_32BPP(1 << 23);
3126
3127 /* r100/r200 divide by 16 */
3128 if (rdev->family < CHIP_R300)
3129 flags |= pitch / 16;
3130 else
3131 flags |= pitch / 8;
3132
3133
3134 DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1)__drm_dbg(DRM_UT_KMS, "writing surface %d %d %x %x\n", reg, flags
, offset, offset+obj_size-1)
;
3135 WREG32(RADEON_SURFACE0_INFO + surf_index, flags)r100_mm_wreg(rdev, (0x0b0c + surf_index), (flags), 0);
3136 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset)r100_mm_wreg(rdev, (0x0b04 + surf_index), (offset), 0);
3137 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1)r100_mm_wreg(rdev, (0x0b08 + surf_index), (offset + obj_size -
1), 0)
;
3138 return 0;
3139}
3140
3141void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
3142{
3143 int surf_index = reg * 16;
3144 WREG32(RADEON_SURFACE0_INFO + surf_index, 0)r100_mm_wreg(rdev, (0x0b0c + surf_index), (0), 0);
3145}
3146
3147void r100_bandwidth_update(struct radeon_device *rdev)
3148{
3149 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
3150 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
3151 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
3152 fixed20_12 crit_point_ff = {0};
3153 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
3154 fixed20_12 memtcas_ff[8] = {
3155 dfixed_init(1){ .full = (u32)((((1)) << 12)) },
3156 dfixed_init(2){ .full = (u32)((((2)) << 12)) },
3157 dfixed_init(3){ .full = (u32)((((3)) << 12)) },
3158 dfixed_init(0){ .full = (u32)((((0)) << 12)) },
3159 dfixed_init_half(1){ .full = (u32)((((1)) << 12) + 2048) },
3160 dfixed_init_half(2){ .full = (u32)((((2)) << 12) + 2048) },
3161 dfixed_init(0){ .full = (u32)((((0)) << 12)) },
3162 };
3163 fixed20_12 memtcas_rs480_ff[8] = {
3164 dfixed_init(0){ .full = (u32)((((0)) << 12)) },
3165 dfixed_init(1){ .full = (u32)((((1)) << 12)) },
3166 dfixed_init(2){ .full = (u32)((((2)) << 12)) },
3167 dfixed_init(3){ .full = (u32)((((3)) << 12)) },
3168 dfixed_init(0){ .full = (u32)((((0)) << 12)) },
3169 dfixed_init_half(1){ .full = (u32)((((1)) << 12) + 2048) },
3170 dfixed_init_half(2){ .full = (u32)((((2)) << 12) + 2048) },
3171 dfixed_init_half(3){ .full = (u32)((((3)) << 12) + 2048) },
3172 };
3173 fixed20_12 memtcas2_ff[8] = {
3174 dfixed_init(0){ .full = (u32)((((0)) << 12)) },
3175 dfixed_init(1){ .full = (u32)((((1)) << 12)) },
3176 dfixed_init(2){ .full = (u32)((((2)) << 12)) },
3177 dfixed_init(3){ .full = (u32)((((3)) << 12)) },
3178 dfixed_init(4){ .full = (u32)((((4)) << 12)) },
3179 dfixed_init(5){ .full = (u32)((((5)) << 12)) },
3180 dfixed_init(6){ .full = (u32)((((6)) << 12)) },
3181 dfixed_init(7){ .full = (u32)((((7)) << 12)) },
3182 };
3183 fixed20_12 memtrbs[8] = {
3184 dfixed_init(1){ .full = (u32)((((1)) << 12)) },
3185 dfixed_init_half(1){ .full = (u32)((((1)) << 12) + 2048) },
3186 dfixed_init(2){ .full = (u32)((((2)) << 12)) },
3187 dfixed_init_half(2){ .full = (u32)((((2)) << 12) + 2048) },
3188 dfixed_init(3){ .full = (u32)((((3)) << 12)) },
3189 dfixed_init_half(3){ .full = (u32)((((3)) << 12) + 2048) },
3190 dfixed_init(4){ .full = (u32)((((4)) << 12)) },
3191 dfixed_init_half(4){ .full = (u32)((((4)) << 12) + 2048) }
3192 };
3193 fixed20_12 memtrbs_r4xx[8] = {
3194 dfixed_init(4){ .full = (u32)((((4)) << 12)) },
3195 dfixed_init(5){ .full = (u32)((((5)) << 12)) },
3196 dfixed_init(6){ .full = (u32)((((6)) << 12)) },
3197 dfixed_init(7){ .full = (u32)((((7)) << 12)) },
3198 dfixed_init(8){ .full = (u32)((((8)) << 12)) },
3199 dfixed_init(9){ .full = (u32)((((9)) << 12)) },
3200 dfixed_init(10){ .full = (u32)((((10)) << 12)) },
3201 dfixed_init(11){ .full = (u32)((((11)) << 12)) }
3202 };
3203 fixed20_12 min_mem_eff;
3204 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
3205 fixed20_12 cur_latency_mclk, cur_latency_sclk;
3206 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate = {0},
3207 disp_drain_rate2, read_return_rate;
3208 fixed20_12 time_disp1_drop_priority;
3209 int c;
3210 int cur_size = 16; /* in octawords */
3211 int critical_point = 0, critical_point2;
3212/* uint32_t read_return_rate, time_disp1_drop_priority; */
3213 int stop_req, max_stop_req;
3214 struct drm_display_mode *mode1 = NULL((void *)0);
3215 struct drm_display_mode *mode2 = NULL((void *)0);
3216 uint32_t pixel_bytes1 = 0;
3217 uint32_t pixel_bytes2 = 0;
3218
3219 /* Guess line buffer size to be 8192 pixels */
3220 u32 lb_size = 8192;
3221
3222 if (!rdev->mode_info.mode_config_initialized)
3223 return;
3224
3225 radeon_update_display_priority(rdev);
3226
3227 if (rdev->mode_info.crtcs[0]->base.enabled) {
3228 const struct drm_framebuffer *fb =
3229 rdev->mode_info.crtcs[0]->base.primary->fb;
3230
3231 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
3232 pixel_bytes1 = fb->format->cpp[0];
3233 }
3234 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3235 if (rdev->mode_info.crtcs[1]->base.enabled) {
3236 const struct drm_framebuffer *fb =
3237 rdev->mode_info.crtcs[1]->base.primary->fb;
3238
3239 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
3240 pixel_bytes2 = fb->format->cpp[0];
3241 }
3242 }
3243
3244 min_mem_eff.full = dfixed_const_8(0)(u32)(((0) << 12) + 3277);
3245 /* get modes */
3246 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)((rdev->family == CHIP_R300) || (rdev->family == CHIP_RV350
) || (rdev->family == CHIP_R350) || (rdev->family == CHIP_RV380
) || (rdev->family == CHIP_R420) || (rdev->family == CHIP_R423
) || (rdev->family == CHIP_RV410) || (rdev->family == CHIP_RS400
) || (rdev->family == CHIP_RS480))
) {
3247 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER)r100_mm_rreg(rdev, (0x180), 0);
3248 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK0xf << R300_MC_DISP1R_INIT_LAT_SHIFT12);
3249 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK0xf << R300_MC_DISP0R_INIT_LAT_SHIFT8);
3250 /* check crtc enables */
3251 if (mode2)
3252 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT12);
3253 if (mode1)
3254 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT8);
3255 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer)r100_mm_wreg(rdev, (0x180), (mc_init_misc_lat_timer), 0);
3256 }
3257
3258 /*
3259 * determine is there is enough bw for current mode
3260 */
3261 sclk_ff = rdev->pm.sclk;
3262 mclk_ff = rdev->pm.mclk;
3263
3264 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
3265 temp_ff.full = dfixed_const(temp)(u32)(((temp) << 12));
3266 mem_bw.full = dfixed_mul(mclk_ff, temp_ff)((u64)((u64)(mclk_ff).full * (temp_ff).full + 2048) >> 12
)
;
3267
3268 pix_clk.full = 0;
3269 pix_clk2.full = 0;
3270 peak_disp_bw.full = 0;
3271 if (mode1) {
3272 temp_ff.full = dfixed_const(1000)(u32)(((1000) << 12));
3273 pix_clk.full = dfixed_const(mode1->clock)(u32)(((mode1->clock) << 12)); /* convert to fixed point */
3274 pix_clk.full = dfixed_div(pix_clk, temp_ff);
3275 temp_ff.full = dfixed_const(pixel_bytes1)(u32)(((pixel_bytes1) << 12));
3276 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff)((u64)((u64)(pix_clk).full * (temp_ff).full + 2048) >> 12
)
;
3277 }
3278 if (mode2) {
3279 temp_ff.full = dfixed_const(1000)(u32)(((1000) << 12));
3280 pix_clk2.full = dfixed_const(mode2->clock)(u32)(((mode2->clock) << 12)); /* convert to fixed point */
3281 pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
3282 temp_ff.full = dfixed_const(pixel_bytes2)(u32)(((pixel_bytes2) << 12));
3283 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff)((u64)((u64)(pix_clk2).full * (temp_ff).full + 2048) >>
12)
;
3284 }
3285
3286 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff)((u64)((u64)(mem_bw).full * (min_mem_eff).full + 2048) >>
12)
;
3287 if (peak_disp_bw.full >= mem_bw.full) {
3288 DRM_ERROR("You may not have enough display bandwidth for current mode\n"__drm_err("You may not have enough display bandwidth for current mode\n"
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"
)
3289 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n")__drm_err("You may not have enough display bandwidth for current mode\n"
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"
)
;
3290 }
3291
3292 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
3293 temp = RREG32(RADEON_MEM_TIMING_CNTL)r100_mm_rreg(rdev, (0x0144), 0);
3294 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
3295 mem_trcd = ((temp >> 2) & 0x3) + 1;
3296 mem_trp = ((temp & 0x3)) + 1;
3297 mem_tras = ((temp & 0x70) >> 4) + 1;
3298 } else if (rdev->family == CHIP_R300 ||
3299 rdev->family == CHIP_R350) { /* r300, r350 */
3300 mem_trcd = (temp & 0x7) + 1;
3301 mem_trp = ((temp >> 8) & 0x7) + 1;
3302 mem_tras = ((temp >> 11) & 0xf) + 4;
3303 } else if (rdev->family == CHIP_RV350 ||
3304 rdev->family == CHIP_RV380) {
3305 /* rv3x0 */
3306 mem_trcd = (temp & 0x7) + 3;
3307 mem_trp = ((temp >> 8) & 0x7) + 3;
3308 mem_tras = ((temp >> 11) & 0xf) + 6;
3309 } else if (rdev->family == CHIP_R420 ||
3310 rdev->family == CHIP_R423 ||
3311 rdev->family == CHIP_RV410) {
3312 /* r4xx */
3313 mem_trcd = (temp & 0xf) + 3;
3314 if (mem_trcd > 15)
3315 mem_trcd = 15;
3316 mem_trp = ((temp >> 8) & 0xf) + 3;
3317 if (mem_trp > 15)
3318 mem_trp = 15;
3319 mem_tras = ((temp >> 12) & 0x1f) + 6;
3320 if (mem_tras > 31)
3321 mem_tras = 31;
3322 } else { /* RV200, R200 */
3323 mem_trcd = (temp & 0x7) + 1;
3324 mem_trp = ((temp >> 8) & 0x7) + 1;
3325 mem_tras = ((temp >> 12) & 0xf) + 4;
3326 }
3327 /* convert to FF */
3328 trcd_ff.full = dfixed_const(mem_trcd)(u32)(((mem_trcd) << 12));
3329 trp_ff.full = dfixed_const(mem_trp)(u32)(((mem_trp) << 12));
3330 tras_ff.full = dfixed_const(mem_tras)(u32)(((mem_tras) << 12));
3331
3332 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
3333 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG)r100_mm_rreg(rdev, (0x0158), 0);
3334 data = (temp & (7 << 20)) >> 20;
3335 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
3336 if (rdev->family == CHIP_RS480) /* don't think rs400 */
3337 tcas_ff = memtcas_rs480_ff[data];
3338 else
3339 tcas_ff = memtcas_ff[data];
3340 } else
3341 tcas_ff = memtcas2_ff[data];
3342
3343 if (rdev->family == CHIP_RS400 ||
3344 rdev->family == CHIP_RS480) {
3345 /* extra cas latency stored in bits 23-25 0-4 clocks */
3346 data = (temp >> 23) & 0x7;
3347 if (data < 5)
3348 tcas_ff.full += dfixed_const(data)(u32)(((data) << 12));
3349 }
3350
3351 if (ASIC_IS_R300(rdev)((rdev->family == CHIP_R300) || (rdev->family == CHIP_RV350
) || (rdev->family == CHIP_R350) || (rdev->family == CHIP_RV380
) || (rdev->family == CHIP_R420) || (rdev->family == CHIP_R423
) || (rdev->family == CHIP_RV410) || (rdev->family == CHIP_RS400
) || (rdev->family == CHIP_RS480))
&& !(rdev->flags & RADEON_IS_IGP)) {
3352 /* on the R300, Tcas is included in Trbs.
3353 */
3354 temp = RREG32(RADEON_MEM_CNTL)r100_mm_rreg(rdev, (0x0140), 0);
3355 data = (R300_MEM_NUM_CHANNELS_MASK0x03 & temp);
3356 if (data == 1) {
3357 if (R300_MEM_USE_CD_CH_ONLY(1 << 2) & temp) {
3358 temp = RREG32(R300_MC_IND_INDEX)r100_mm_rreg(rdev, (0x01f8), 0);
3359 temp &= ~R300_MC_IND_ADDR_MASK0x3f;
3360 temp |= R300_MC_READ_CNTL_CD_mcind0x24;
3361 WREG32(R300_MC_IND_INDEX, temp)r100_mm_wreg(rdev, (0x01f8), (temp), 0);
3362 temp = RREG32(R300_MC_IND_DATA)r100_mm_rreg(rdev, (0x01fc), 0);
3363 data = (R300_MEM_RBS_POSITION_C_MASK0x03 & temp);
3364 } else {
3365 temp = RREG32(R300_MC_READ_CNTL_AB)r100_mm_rreg(rdev, (0x017c), 0);
3366 data = (R300_MEM_RBS_POSITION_A_MASK0x03 & temp);
3367 }
3368 } else {
3369 temp = RREG32(R300_MC_READ_CNTL_AB)r100_mm_rreg(rdev, (0x017c), 0);
3370 data = (R300_MEM_RBS_POSITION_A_MASK0x03 & temp);
3371 }
3372 if (rdev->family == CHIP_RV410 ||
3373 rdev->family == CHIP_R420 ||
3374 rdev->family == CHIP_R423)
3375 trbs_ff = memtrbs_r4xx[data];
3376 else
3377 trbs_ff = memtrbs[data];
3378 tcas_ff.full += trbs_ff.full;
3379 }
3380
3381 sclk_eff_ff.full = sclk_ff.full;
3382
3383 if (rdev->flags & RADEON_IS_AGP) {
3384 fixed20_12 agpmode_ff;
3385 agpmode_ff.full = dfixed_const(radeon_agpmode)(u32)(((radeon_agpmode) << 12));
3386 temp_ff.full = dfixed_const_666(16)(u32)(((16) << 12) + 2731);
3387 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff)((u64)((u64)(agpmode_ff).full * (temp_ff).full + 2048) >>
12)
;
3388 }
3389 /* TODO PCIE lanes may affect this - agpmode == 16?? */
3390
3391 if (ASIC_IS_R300(rdev)((rdev->family == CHIP_R300) || (rdev->family == CHIP_RV350
) || (rdev->family == CHIP_R350) || (rdev->family == CHIP_RV380
) || (rdev->family == CHIP_R420) || (rdev->family == CHIP_R423
) || (rdev->family == CHIP_RV410) || (rdev->family == CHIP_RS400
) || (rdev->family == CHIP_RS480))
) {
3392 sclk_delay_ff.full = dfixed_const(250)(u32)(((250) << 12));
3393 } else {
3394 if ((rdev->family == CHIP_RV100) ||
3395 rdev->flags & RADEON_IS_IGP) {
3396 if (rdev->mc.vram_is_ddr)
3397 sclk_delay_ff.full = dfixed_const(41)(u32)(((41) << 12));
3398 else
3399 sclk_delay_ff.full = dfixed_const(33)(u32)(((33) << 12));
3400 } else {
3401 if (rdev->mc.vram_width == 128)
3402 sclk_delay_ff.full = dfixed_const(57)(u32)(((57) << 12));
3403 else
3404 sclk_delay_ff.full = dfixed_const(41)(u32)(((41) << 12));
3405 }
3406 }
3407
3408 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
3409
3410 if (rdev->mc.vram_is_ddr) {
3411 if (rdev->mc.vram_width == 32) {
3412 k1.full = dfixed_const(40)(u32)(((40) << 12));
3413 c = 3;
3414 } else {
3415 k1.full = dfixed_const(20)(u32)(((20) << 12));
3416 c = 1;
3417 }
3418 } else {
3419 k1.full = dfixed_const(40)(u32)(((40) << 12));
3420 c = 3;
3421 }
3422
3423 temp_ff.full = dfixed_const(2)(u32)(((2) << 12));
3424 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff)((u64)((u64)(trcd_ff).full * (temp_ff).full + 2048) >> 12
)
;
3425 temp_ff.full = dfixed_const(c)(u32)(((c) << 12));
3426 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff)((u64)((u64)(tcas_ff).full * (temp_ff).full + 2048) >> 12
)
;
3427 temp_ff.full = dfixed_const(4)(u32)(((4) << 12));
3428 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff)((u64)((u64)(tras_ff).full * (temp_ff).full + 2048) >> 12
)
;
3429 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff)((u64)((u64)(trp_ff).full * (temp_ff).full + 2048) >> 12
)
;
3430 mc_latency_mclk.full += k1.full;
3431
3432 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
3433 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
3434
3435 /*
3436 HW cursor time assuming worst case of full size colour cursor.
3437 */
3438 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))))(u32)((((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))) <<
12))
;
3439 temp_ff.full += trcd_ff.full;
3440 if (temp_ff.full < tras_ff.full)
3441 temp_ff.full = tras_ff.full;
3442 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
3443
3444 temp_ff.full = dfixed_const(cur_size)(u32)(((cur_size) << 12));
3445 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
3446 /*
3447 Find the total latency for the display data.
3448 */
3449 disp_latency_overhead.full = dfixed_const(8)(u32)(((8) << 12));
3450 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
3451 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
3452 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
3453
3454 if (mc_latency_mclk.full > mc_latency_sclk.full)
3455 disp_latency.full = mc_latency_mclk.full;
3456 else
3457 disp_latency.full = mc_latency_sclk.full;
3458
3459 /* setup Max GRPH_STOP_REQ default value */
3460 if (ASIC_IS_RV100(rdev)((rdev->family == CHIP_RV100) || (rdev->family == CHIP_RV200
) || (rdev->family == CHIP_RS100) || (rdev->family == CHIP_RS200
) || (rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280
) || (rdev->family == CHIP_RS300))
)
3461 max_stop_req = 0x5c;
3462 else
3463 max_stop_req = 0x7c;
3464
3465 if (mode1) {
3466 /* CRTC1
3467 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
3468 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
3469 */
3470 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
3471
3472 if (stop_req > max_stop_req)
3473 stop_req = max_stop_req;
3474
3475 /*
3476 Find the drain rate of the display buffer.
3477 */
3478 temp_ff.full = dfixed_const((16/pixel_bytes1))(u32)((((16/pixel_bytes1)) << 12));
3479 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
3480
3481 /*
3482 Find the critical point of the display buffer.
3483 */
3484 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency)((u64)((u64)(disp_drain_rate).full * (disp_latency).full + 2048
) >> 12)
;
3485 crit_point_ff.full += dfixed_const_half(0)(u32)(((0) << 12) + 2048);
3486
3487 critical_point = dfixed_trunc(crit_point_ff)((crit_point_ff).full >> 12);
3488
3489 if (rdev->disp_priority == 2) {
3490 critical_point = 0;
3491 }
3492
3493 /*
3494 The critical point should never be above max_stop_req-4. Setting
3495 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
3496 */
3497 if (max_stop_req - critical_point < 4)
3498 critical_point = 0;
3499
3500 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
3501 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
3502 critical_point = 0x10;
3503 }
3504
3505 temp = RREG32(RADEON_GRPH_BUFFER_CNTL)r100_mm_rreg(rdev, (0x02f0), 0);
3506 temp &= ~(RADEON_GRPH_STOP_REQ_MASK(0x7f<<8));
3507 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT8);
3508 temp &= ~(RADEON_GRPH_START_REQ_MASK(0x7f));
3509 if ((rdev->family == CHIP_R350) &&
3510 (stop_req > 0x15)) {
3511 stop_req -= 0x10;
3512 }
3513 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT0);
3514 temp |= RADEON_GRPH_BUFFER_SIZE(1<<29);
3515 temp &= ~(RADEON_GRPH_CRITICAL_CNTL(1<<28) |
3516 RADEON_GRPH_CRITICAL_AT_SOF(1<<30) |
3517 RADEON_GRPH_STOP_CNTL(1<<31));
3518 /*
3519 Write the result into the register.
3520 */
3521 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |r100_mm_wreg(rdev, (0x02f0), (((temp & ~(0x7f<<16))
| (critical_point << 16))), 0)
3522 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)))r100_mm_wreg(rdev, (0x02f0), (((temp & ~(0x7f<<16))
| (critical_point << 16))), 0)
;
3523
3524#if 0
3525 if ((rdev->family == CHIP_RS400) ||
3526 (rdev->family == CHIP_RS480)) {
3527 /* attempt to program RS400 disp regs correctly ??? */
3528 temp = RREG32(RS400_DISP1_REG_CNTL)r100_mm_rreg(rdev, (RS400_DISP1_REG_CNTL), 0);
3529 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK0x3ff |
3530 RS400_DISP1_STOP_REQ_LEVEL_MASK0x3ff);
3531 WREG32(RS400_DISP1_REQ_CNTL1, (temp |r100_mm_wreg(rdev, (0xe3c), ((temp | (critical_point <<
0) | (critical_point << 12))), 0)
3532 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |r100_mm_wreg(rdev, (0xe3c), ((temp | (critical_point <<
0) | (critical_point << 12))), 0)
3533 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)))r100_mm_wreg(rdev, (0xe3c), ((temp | (critical_point <<
0) | (critical_point << 12))), 0)
;
3534 temp = RREG32(RS400_DMIF_MEM_CNTL1)r100_mm_rreg(rdev, (0xe38), 0);
3535 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK0x3ff |
3536 RS400_DISP1_CRITICAL_POINT_STOP_MASK0x3ff);
3537 WREG32(RS400_DMIF_MEM_CNTL1, (temp |r100_mm_wreg(rdev, (0xe38), ((temp | (critical_point <<
12) | (critical_point << 22))), 0)
3538 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |r100_mm_wreg(rdev, (0xe38), ((temp | (critical_point <<
12) | (critical_point << 22))), 0)
3539 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)))r100_mm_wreg(rdev, (0xe38), ((temp | (critical_point <<
12) | (critical_point << 22))), 0)
;
3540 }
3541#endif
3542
3543 DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",__drm_dbg(DRM_UT_KMS, "GRPH_BUFFER_CNTL from to %x\n", (unsigned
int)r100_mm_rreg(rdev, (0x02f0), 0))
3544 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */__drm_dbg(DRM_UT_KMS, "GRPH_BUFFER_CNTL from to %x\n", (unsigned
int)r100_mm_rreg(rdev, (0x02f0), 0))
3545 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL))__drm_dbg(DRM_UT_KMS, "GRPH_BUFFER_CNTL from to %x\n", (unsigned
int)r100_mm_rreg(rdev, (0x02f0), 0))
;
3546 }
3547
3548 if (mode2) {
3549 u32 grph2_cntl;
3550 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
3551
3552 if (stop_req > max_stop_req)
3553 stop_req = max_stop_req;
3554
3555 /*
3556 Find the drain rate of the display buffer.
3557 */
3558 temp_ff.full = dfixed_const((16/pixel_bytes2))(u32)((((16/pixel_bytes2)) << 12));
3559 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
3560
3561 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL)r100_mm_rreg(rdev, (0x03f0), 0);
3562 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK(0x7f<<8));
3563 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT8);
3564 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK(0x7f));
3565 if ((rdev->family == CHIP_R350) &&
3566 (stop_req > 0x15)) {
3567 stop_req -= 0x10;
3568 }
3569 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT0);
3570 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE(1<<29);
3571 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL(1<<28) |
3572 RADEON_GRPH_CRITICAL_AT_SOF(1<<30) |
3573 RADEON_GRPH_STOP_CNTL(1<<31));
3574
3575 if ((rdev->family == CHIP_RS100) ||
3576 (rdev->family == CHIP_RS200))
3577 critical_point2 = 0;
3578 else {
3579 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
3580 temp_ff.full = dfixed_const(temp)(u32)(((temp) << 12));
3581 temp_ff.full = dfixed_mul(mclk_ff, temp_ff)((u64)((u64)(mclk_ff).full * (temp_ff).full + 2048) >> 12
)
;
3582 if (sclk_ff.full < temp_ff.full)
3583 temp_ff.full = sclk_ff.full;
3584
3585 read_return_rate.full = temp_ff.full;
3586
3587 if (mode1) {
3588 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
3589 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
3590 } else {
3591 time_disp1_drop_priority.full = 0;
3592 }
3593 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
3594 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2)((u64)((u64)(crit_point_ff).full * (disp_drain_rate2).full + 2048
) >> 12)
;
3595 crit_point_ff.full += dfixed_const_half(0)(u32)(((0) << 12) + 2048);
3596
3597 critical_point2 = dfixed_trunc(crit_point_ff)((crit_point_ff).full >> 12);
3598
3599 if (rdev->disp_priority == 2) {
3600 critical_point2 = 0;
3601 }
3602
3603 if (max_stop_req - critical_point2 < 4)
3604 critical_point2 = 0;
3605
3606 }
3607
3608 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
3609 /* some R300 cards have problem with this set to 0 */
3610 critical_point2 = 0x10;
3611 }
3612
3613 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |r100_mm_wreg(rdev, (0x03f0), (((grph2_cntl & ~(0x7f<<
16)) | (critical_point2 << 16))), 0)
3614 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)))r100_mm_wreg(rdev, (0x03f0), (((grph2_cntl & ~(0x7f<<
16)) | (critical_point2 << 16))), 0)
;
3615
3616 if ((rdev->family == CHIP_RS400) ||
3617 (rdev->family == CHIP_RS480)) {
3618#if 0
3619 /* attempt to program RS400 disp2 regs correctly ??? */
3620 temp = RREG32(RS400_DISP2_REQ_CNTL1)r100_mm_rreg(rdev, (0xe30), 0);
3621 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK0x3ff |
3622 RS400_DISP2_STOP_REQ_LEVEL_MASK0x3ff);
3623 WREG32(RS400_DISP2_REQ_CNTL1, (temp |r100_mm_wreg(rdev, (0xe30), ((temp | (critical_point2 <<
0) | (critical_point2 << 12))), 0)
3624 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |r100_mm_wreg(rdev, (0xe30), ((temp | (critical_point2 <<
0) | (critical_point2 << 12))), 0)
3625 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)))r100_mm_wreg(rdev, (0xe30), ((temp | (critical_point2 <<
0) | (critical_point2 << 12))), 0)
;
3626 temp = RREG32(RS400_DISP2_REQ_CNTL2)r100_mm_rreg(rdev, (0xe34), 0);
3627 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK0x3ff |
3628 RS400_DISP2_CRITICAL_POINT_STOP_MASK0x3ff);
3629 WREG32(RS400_DISP2_REQ_CNTL2, (temp |r100_mm_wreg(rdev, (0xe34), ((temp | (critical_point2 <<
12) | (critical_point2 << 22))), 0)
3630 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |r100_mm_wreg(rdev, (0xe34), ((temp | (critical_point2 <<
12) | (critical_point2 << 22))), 0)
3631 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)))r100_mm_wreg(rdev, (0xe34), ((temp | (critical_point2 <<
12) | (critical_point2 << 22))), 0)
;
3632#endif
3633 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC)r100_mm_wreg(rdev, (0xe30), (0x105DC1CC), 0);
3634 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000)r100_mm_wreg(rdev, (0xe34), (0x2749D000), 0);
3635 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC)r100_mm_wreg(rdev, (0xe38), (0x29CA71DC), 0);
3636 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC)r100_mm_wreg(rdev, (0xe3c), (0x28FBC3AC), 0);
3637 }
3638
3639 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",__drm_dbg(DRM_UT_KMS, "GRPH2_BUFFER_CNTL from to %x\n", (unsigned
int)r100_mm_rreg(rdev, (0x03f0), 0))
3640 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL))__drm_dbg(DRM_UT_KMS, "GRPH2_BUFFER_CNTL from to %x\n", (unsigned
int)r100_mm_rreg(rdev, (0x03f0), 0))
;
3641 }
3642
3643 /* Save number of lines the linebuffer leads before the scanout */
3644 if (mode1)
3645 rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay)(((lb_size) + ((mode1->crtc_hdisplay) - 1)) / (mode1->crtc_hdisplay
))
;
3646
3647 if (mode2)
3648 rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay)(((lb_size) + ((mode2->crtc_hdisplay) - 1)) / (mode2->crtc_hdisplay
))
;
3649}
3650
3651int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3652{
3653 uint32_t scratch;
3654 uint32_t tmp = 0;
3655 unsigned i;
3656 int r;
3657
3658 r = radeon_scratch_get(rdev, &scratch);
3659 if (r) {
3660 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r)__drm_err("radeon: cp failed to get scratch reg (%d).\n", r);
3661 return r;
3662 }
3663 WREG32(scratch, 0xCAFEDEAD)r100_mm_wreg(rdev, (scratch), (0xCAFEDEAD), 0);
3664 r = radeon_ring_lock(rdev, ring, 2);
3665 if (r) {
3666 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r)__drm_err("radeon: cp failed to lock ring (%d).\n", r);
3667 radeon_scratch_free(rdev, scratch);
3668 return r;
3669 }
3670 radeon_ring_write(ring, PACKET0(scratch, 0)(0x00000000 | ((((scratch) >> 2) << 0) & (0x1ffff
<< 0)) | ((((0)) << 16) & (0x3fff << 16
)))
);
3671 radeon_ring_write(ring, 0xDEADBEEF);
3672 radeon_ring_unlock_commit(rdev, ring, false0);
3673 for (i = 0; i < rdev->usec_timeout; i++) {
3674 tmp = RREG32(scratch)r100_mm_rreg(rdev, (scratch), 0);
3675 if (tmp == 0xDEADBEEF) {
3676 break;
3677 }
3678 udelay(1);
3679 }
3680 if (i < rdev->usec_timeout) {
3681 DRM_INFO("ring test succeeded in %d usecs\n", i)printk("\0016" "[" "drm" "] " "ring test succeeded in %d usecs\n"
, i)
;
3682 } else {
3683 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",__drm_err("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n"
, scratch, tmp)
3684 scratch, tmp)__drm_err("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n"
, scratch, tmp)
;
3685 r = -EINVAL22;
3686 }
3687 radeon_scratch_free(rdev, scratch);
3688 return r;
3689}
3690
3691void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3692{
3693 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX0];
3694
3695 if (ring->rptr_save_reg) {
3696 u32 next_rptr = ring->wptr + 2 + 3;
3697 radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0)(0x00000000 | ((((ring->rptr_save_reg) >> 2) <<
0) & (0x1ffff << 0)) | ((((0)) << 16) & (
0x3fff << 16)))
);
3698 radeon_ring_write(ring, next_rptr);
3699 }
3700
3701 radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1)(0x00000000 | ((((0x0738) >> 2) << 0) & (0x1ffff
<< 0)) | ((((1)) << 16) & (0x3fff << 16
)))
);
3702 radeon_ring_write(ring, ib->gpu_addr);
3703 radeon_ring_write(ring, ib->length_dw);
3704}
3705
3706int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3707{
3708 struct radeon_ib ib;
3709 uint32_t scratch;
3710 uint32_t tmp = 0;
3711 unsigned i;
3712 int r;
3713
3714 r = radeon_scratch_get(rdev, &scratch);
3715 if (r) {
3716 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r)__drm_err("radeon: failed to get scratch reg (%d).\n", r);
3717 return r;
3718 }
3719 WREG32(scratch, 0xCAFEDEAD)r100_mm_wreg(rdev, (scratch), (0xCAFEDEAD), 0);
3720 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX0, &ib, NULL((void *)0), 256);
3721 if (r) {
3722 DRM_ERROR("radeon: failed to get ib (%d).\n", r)__drm_err("radeon: failed to get ib (%d).\n", r);
3723 goto free_scratch;
3724 }
3725 ib.ptr[0] = PACKET0(scratch, 0)(0x00000000 | ((((scratch) >> 2) << 0) & (0x1ffff
<< 0)) | ((((0)) << 16) & (0x3fff << 16
)))
;
3726 ib.ptr[1] = 0xDEADBEEF;
3727 ib.ptr[2] = PACKET2(0)(0x80000000 | ((((0)) << 0) & (0x3fffffff << 0
)))
;
3728 ib.ptr[3] = PACKET2(0)(0x80000000 | ((((0)) << 0) & (0x3fffffff << 0
)))
;
3729 ib.ptr[4] = PACKET2(0)(0x80000000 | ((((0)) << 0) & (0x3fffffff << 0
)))
;
3730 ib.ptr[5] = PACKET2(0)(0x80000000 | ((((0)) << 0) & (0x3fffffff << 0
)))
;
3731 ib.ptr[6] = PACKET2(0)(0x80000000 | ((((0)) << 0) & (0x3fffffff << 0
)))
;
3732 ib.ptr[7] = PACKET2(0)(0x80000000 | ((((0)) << 0) & (0x3fffffff << 0
)))
;
3733 ib.length_dw = 8;
3734 r = radeon_ib_schedule(rdev, &ib, NULL((void *)0), false0);
3735 if (r) {
3736 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r)__drm_err("radeon: failed to schedule ib (%d).\n", r);
3737 goto free_ib;
3738 }
3739 r = radeon_fence_wait_timeout(ib.fence, false0, usecs_to_jiffies((((uint64_t)(1000000)) * hz / 1000000)
3740 RADEON_USEC_IB_TEST_TIMEOUT)(((uint64_t)(1000000)) * hz / 1000000));
3741 if (r < 0) {
3742 DRM_ERROR("radeon: fence wait failed (%d).\n", r)__drm_err("radeon: fence wait failed (%d).\n", r);
3743 goto free_ib;
3744 } else if (r == 0) {
3745 DRM_ERROR("radeon: fence wait timed out.\n")__drm_err("radeon: fence wait timed out.\n");
3746 r = -ETIMEDOUT60;
3747 goto free_ib;
3748 }
3749 r = 0;
3750 for (i = 0; i < rdev->usec_timeout; i++) {
3751 tmp = RREG32(scratch)r100_mm_rreg(rdev, (scratch), 0);
3752 if (tmp == 0xDEADBEEF) {
3753 break;
3754 }
3755 udelay(1);
3756 }
3757 if (i < rdev->usec_timeout) {
3758 DRM_INFO("ib test succeeded in %u usecs\n", i)printk("\0016" "[" "drm" "] " "ib test succeeded in %u usecs\n"
, i)
;
3759 } else {
3760 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",__drm_err("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n"
, scratch, tmp)
3761 scratch, tmp)__drm_err("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n"
, scratch, tmp)
;
3762 r = -EINVAL22;
3763 }
3764free_ib:
3765 radeon_ib_free(rdev, &ib);
3766free_scratch:
3767 radeon_scratch_free(rdev, scratch);
3768 return r;
3769}
3770
3771void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3772{
3773 /* Shutdown CP we shouldn't need to do that but better be safe than
3774 * sorry
3775 */
3776 rdev->ring[RADEON_RING_TYPE_GFX_INDEX0].ready = false0;
3777 WREG32(R_000740_CP_CSQ_CNTL, 0)r100_mm_wreg(rdev, (0x000740), (0), 0);
153
Calling 'r100_mm_wreg'
157
Returning from 'r100_mm_wreg'
3778
3779 /* Save few CRTC registers */
3780 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT)ioread8((rdev->rmmio) + (0x0003C2));
3781 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL)r100_mm_rreg(rdev, (0x000054), 0);
158
Calling 'r100_mm_rreg'
162
Returning from 'r100_mm_rreg'
3782 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL)r100_mm_rreg(rdev, (0x000050), 0);
163
Calling 'r100_mm_rreg'
166
Returning from 'r100_mm_rreg'
3783 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET)r100_mm_rreg(rdev, (0x000260), 0);
167
Calling 'r100_mm_rreg'
171
Returning from 'r100_mm_rreg'
3784 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
172
Assuming the condition is false
173
Taking false branch
3785 save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL)r100_mm_rreg(rdev, (0x0003F8), 0);
3786 save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET)r100_mm_rreg(rdev, (0x000360), 0);
3787 }
3788
3789 /* Disable VGA aperture access */
3790 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT)iowrite8(0xFD & save->GENMO_WT, (rdev->rmmio) + (0x0003C2
))
;
3791 /* Disable cursor, overlay, crtc */
3792 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1))r100_mm_wreg(rdev, (0x000260), (save->CUR_OFFSET | (((1) &
0x1) << 31)), 0)
;
174
Calling 'r100_mm_wreg'
177
Returning from 'r100_mm_wreg'
3793 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |r100_mm_wreg(rdev, (0x000054), (save->CRTC_EXT_CNTL | (((1
) & 0x1) << 10)), 0)
178
Calling 'r100_mm_wreg'
181
Returning from 'r100_mm_wreg'
3794 S_000054_CRTC_DISPLAY_DIS(1))r100_mm_wreg(rdev, (0x000054), (save->CRTC_EXT_CNTL | (((1
) & 0x1) << 10)), 0)
;
3795 WREG32(R_000050_CRTC_GEN_CNTL,r100_mm_wreg(rdev, (0x000050), ((0xFFFEFFFF & save->CRTC_GEN_CNTL
) | (((1) & 0x1) << 26)), 0)
182
Calling 'r100_mm_wreg'
185
Returning from 'r100_mm_wreg'
3796 (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |r100_mm_wreg(rdev, (0x000050), ((0xFFFEFFFF & save->CRTC_GEN_CNTL
) | (((1) & 0x1) << 26)), 0)
3797 S_000050_CRTC_DISP_REQ_EN_B(1))r100_mm_wreg(rdev, (0x000050), ((0xFFFEFFFF & save->CRTC_GEN_CNTL
) | (((1) & 0x1) << 26)), 0)
;
3798 WREG32(R_000420_OV0_SCALE_CNTL,r100_mm_wreg(rdev, (0x000420), (0xBFFFFFFF & r100_mm_rreg
(rdev, (0x000420), 0)), 0)
186
Calling 'r100_mm_rreg'
190
Returning from 'r100_mm_rreg'
191
Calling 'r100_mm_wreg'
194
Returning from 'r100_mm_wreg'
3799 C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL))r100_mm_wreg(rdev, (0x000420), (0xBFFFFFFF & r100_mm_rreg
(rdev, (0x000420), 0)), 0)
;
3800 WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET)r100_mm_wreg(rdev, (0x000260), (0x7FFFFFFF & save->CUR_OFFSET
), 0)
;
195
Calling 'r100_mm_wreg'
198
Returning from 'r100_mm_wreg'
3801 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
199
Taking false branch
3802 WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |r100_mm_wreg(rdev, (0x000360), (save->CUR2_OFFSET | (((1) &
0x1) << 31)), 0)
3803 S_000360_CUR2_LOCK(1))r100_mm_wreg(rdev, (0x000360), (save->CUR2_OFFSET | (((1) &
0x1) << 31)), 0)
;
3804 WREG32(R_0003F8_CRTC2_GEN_CNTL,r100_mm_wreg(rdev, (0x0003F8), ((0xFFFEFFFF & save->CRTC2_GEN_CNTL
) | (((1) & 0x1) << 23) | (((1) & 0x1) <<
26)), 0)
3805 (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |r100_mm_wreg(rdev, (0x0003F8), ((0xFFFEFFFF & save->CRTC2_GEN_CNTL
) | (((1) & 0x1) << 23) | (((1) & 0x1) <<
26)), 0)
3806 S_0003F8_CRTC2_DISPLAY_DIS(1) |r100_mm_wreg(rdev, (0x0003F8), ((0xFFFEFFFF & save->CRTC2_GEN_CNTL
) | (((1) & 0x1) << 23) | (((1) & 0x1) <<
26)), 0)
3807 S_0003F8_CRTC2_DISP_REQ_EN_B(1))r100_mm_wreg(rdev, (0x0003F8), ((0xFFFEFFFF & save->CRTC2_GEN_CNTL
) | (((1) & 0x1) << 23) | (((1) & 0x1) <<
26)), 0)
;
3808 WREG32(R_000360_CUR2_OFFSET,r100_mm_wreg(rdev, (0x000360), (0x7FFFFFFF & save->CUR2_OFFSET
), 0)
3809 C_000360_CUR2_LOCK & save->CUR2_OFFSET)r100_mm_wreg(rdev, (0x000360), (0x7FFFFFFF & save->CUR2_OFFSET
), 0)
;
3810 }
3811}
200
Returning without writing to 'rdev->me_fw'
3812
3813void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3814{
3815 /* Update base address for crtc */
3816 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start)r100_mm_wreg(rdev, (0x00023C), (rdev->mc.vram_start), 0);
218
Calling 'r100_mm_wreg'
222
Returning from 'r100_mm_wreg'
3817 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
223
Assuming the condition is false
224
Taking false branch
3818 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start)r100_mm_wreg(rdev, (0x00033C), (rdev->mc.vram_start), 0);
3819 }
3820 /* Restore CRTC registers */
3821 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT)iowrite8(save->GENMO_WT, (rdev->rmmio) + (0x0003C2));
3822 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL)r100_mm_wreg(rdev, (0x000054), (save->CRTC_EXT_CNTL), 0);
225
Calling 'r100_mm_wreg'
228
Returning from 'r100_mm_wreg'
3823 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL)r100_mm_wreg(rdev, (0x000050), (save->CRTC_GEN_CNTL), 0);
229
Calling 'r100_mm_wreg'
232
Returning from 'r100_mm_wreg'
3824 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
233
Taking false branch
3825 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL)r100_mm_wreg(rdev, (0x0003F8), (save->CRTC2_GEN_CNTL), 0);
3826 }
3827}
234
Returning without writing to 'rdev->me_fw'
3828
3829void r100_vga_render_disable(struct radeon_device *rdev)
3830{
3831 u32 tmp;
3832
3833 tmp = RREG8(R_0003C2_GENMO_WT)ioread8((rdev->rmmio) + (0x0003C2));
3834 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp)iowrite8(0xFD & tmp, (rdev->rmmio) + (0x0003C2));
3835}
9
Returning without writing to 'rdev->me_fw'
3836
3837static void r100_debugfs(struct radeon_device *rdev)
3838{
3839 int r;
3840
3841 r = r100_debugfs_mc_info_init(rdev);
2
Calling 'r100_debugfs_mc_info_init'
4
Returning from 'r100_debugfs_mc_info_init'
3842 if (r
4.1
'r' is 0
4.1
'r' is 0
4.1
'r' is 0
)
5
Taking false branch
3843 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n")printf("drm:pid%d:%s *WARNING* " "Failed to create r100_mc debugfs file.\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
3844}
6
Returning without writing to 'rdev->me_fw'
3845
3846static void r100_mc_program(struct radeon_device *rdev)
3847{
3848 struct r100_mc_save save;
3849
3850 /* Stops all mc clients */
3851 r100_mc_stop(rdev, &save);
152
Calling 'r100_mc_stop'
201
Returning from 'r100_mc_stop'
3852 if (rdev->flags & RADEON_IS_AGP) {
202
Assuming the condition is false
203
Taking false branch
3853 WREG32(R_00014C_MC_AGP_LOCATION,r100_mm_wreg(rdev, (0x00014C), ((((rdev->mc.gtt_start >>
16) & 0xFFFF) << 0) | (((rdev->mc.gtt_end >>
16) & 0xFFFF) << 16)), 0)
3854 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |r100_mm_wreg(rdev, (0x00014C), ((((rdev->mc.gtt_start >>
16) & 0xFFFF) << 0) | (((rdev->mc.gtt_end >>
16) & 0xFFFF) << 16)), 0)
3855 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16))r100_mm_wreg(rdev, (0x00014C), ((((rdev->mc.gtt_start >>
16) & 0xFFFF) << 0) | (((rdev->mc.gtt_end >>
16) & 0xFFFF) << 16)), 0)
;
3856 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base))r100_mm_wreg(rdev, (0x000170), (((u32)(rdev->mc.agp_base))
), 0)
;
3857 if (rdev->family > CHIP_RV200)
3858 WREG32(R_00015C_AGP_BASE_2,r100_mm_wreg(rdev, (0x00015C), (((u32)(((rdev->mc.agp_base
) >> 16) >> 16)) & 0xff), 0)
3859 upper_32_bits(rdev->mc.agp_base) & 0xff)r100_mm_wreg(rdev, (0x00015C), (((u32)(((rdev->mc.agp_base
) >> 16) >> 16)) & 0xff), 0)
;
3860 } else {
3861 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF)r100_mm_wreg(rdev, (0x00014C), (0x0FFFFFFF), 0);
204
Calling 'r100_mm_wreg'
207
Returning from 'r100_mm_wreg'
3862 WREG32(R_000170_AGP_BASE, 0)r100_mm_wreg(rdev, (0x000170), (0), 0);
3863 if (rdev->family > CHIP_RV200)
208
Assuming field 'family' is <= CHIP_RV200
209
Taking false branch
3864 WREG32(R_00015C_AGP_BASE_2, 0)r100_mm_wreg(rdev, (0x00015C), (0), 0);
3865 }
3866 /* Wait for mc idle */
3867 if (r100_mc_wait_for_idle(rdev))
210
Assuming the condition is false
211
Taking false branch
3868 dev_warn(rdev->dev, "Wait for MC idle timeout.\n")printf("drm:pid%d:%s *WARNING* " "Wait for MC idle timeout.\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
3869 /* Program MC, should be a 32bits limited address space */
3870 WREG32(R_000148_MC_FB_LOCATION,r100_mm_wreg(rdev, (0x000148), ((((rdev->mc.vram_start >>
16) & 0xFFFF) << 0) | (((rdev->mc.vram_end >>
16) & 0xFFFF) << 16)), 0)
212
Calling 'r100_mm_wreg'
216
Returning from 'r100_mm_wreg'
3871 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |r100_mm_wreg(rdev, (0x000148), ((((rdev->mc.vram_start >>
16) & 0xFFFF) << 0) | (((rdev->mc.vram_end >>
16) & 0xFFFF) << 16)), 0)
3872 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16))r100_mm_wreg(rdev, (0x000148), ((((rdev->mc.vram_start >>
16) & 0xFFFF) << 0) | (((rdev->mc.vram_end >>
16) & 0xFFFF) << 16)), 0)
;
3873 r100_mc_resume(rdev, &save);
217
Calling 'r100_mc_resume'
235
Returning from 'r100_mc_resume'
3874}
3875
3876static void r100_clock_startup(struct radeon_device *rdev)
3877{
3878 u32 tmp;
3879
3880 if (radeon_dynclks != -1 && radeon_dynclks)
3881 radeon_legacy_set_clock_gating(rdev, 1);
3882 /* We need to force on some of the block */
3883 tmp = RREG32_PLL(R_00000D_SCLK_CNTL)rdev->pll_rreg(rdev, (0x00000D));
3884 tmp |= S_00000D_FORCE_CP(1)(((1) & 0x1) << 16) | S_00000D_FORCE_VIP(1)(((1) & 0x1) << 23);
3885 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3886 tmp |= S_00000D_FORCE_DISP1(1)(((1) & 0x1) << 18) | S_00000D_FORCE_DISP2(1)(((1) & 0x1) << 15);
3887 WREG32_PLL(R_00000D_SCLK_CNTL, tmp)rdev->pll_wreg(rdev, (0x00000D), (tmp));
3888}
3889
3890static int r100_startup(struct radeon_device *rdev)
3891{
3892 int r;
3893
3894 /* set common regs */
3895 r100_set_common_regs(rdev);
120
Calling 'r100_set_common_regs'
150
Returning from 'r100_set_common_regs'
3896 /* program mc */
3897 r100_mc_program(rdev);
151
Calling 'r100_mc_program'
236
Returning from 'r100_mc_program'
3898 /* Resume clock */
3899 r100_clock_startup(rdev);
3900 /* Initialize GART (initialize after TTM so we can allocate
3901 * memory through TTM but finalize after TTM) */
3902 r100_enable_bm(rdev);
237
Calling 'r100_enable_bm'
248
Returning from 'r100_enable_bm'
3903 if (rdev->flags & RADEON_IS_PCI) {
249
Assuming the condition is false
250
Taking false branch
3904 r = r100_pci_gart_enable(rdev);
3905 if (r)
3906 return r;
3907 }
3908
3909 /* allocate wb buffer */
3910 r = radeon_wb_init(rdev);
3911 if (r)
251
Assuming 'r' is 0
252
Taking false branch
3912 return r;
3913
3914 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX0);
3915 if (r) {
253
Assuming 'r' is 0
254
Taking false branch
3916 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing CP fences (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
3917 return r;
3918 }
3919
3920 /* Enable IRQ */
3921 if (!rdev->irq.installed) {
255
Assuming field 'installed' is false
256
Taking true branch
3922 r = radeon_irq_kms_init(rdev);
3923 if (r)
257
Assuming 'r' is 0
258
Taking false branch
3924 return r;
3925 }
3926
3927 r100_irq_set(rdev);
259
Calling 'r100_irq_set'
269
Returning from 'r100_irq_set'
3928 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL)r100_mm_rreg(rdev, (0x0130), 0);
270
Calling 'r100_mm_rreg'
273
Returning from 'r100_mm_rreg'
3929 /* 1M ring buffer */
3930 r = r100_cp_init(rdev, 1024 * 1024);
274
Calling 'r100_cp_init'
3931 if (r) {
3932 dev_err(rdev->dev, "failed initializing CP (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "failed initializing CP (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
3933 return r;
3934 }
3935
3936 r = radeon_ib_pool_init(rdev);
3937 if (r) {
3938 dev_err(rdev->dev, "IB initialization failed (%d).\n", r)printf("drm:pid%d:%s *ERROR* " "IB initialization failed (%d).\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r)
;
3939 return r;
3940 }
3941
3942 return 0;
3943}
3944
3945int r100_resume(struct radeon_device *rdev)
3946{
3947 int r;
3948
3949 /* Make sur GART are not working */
3950 if (rdev->flags & RADEON_IS_PCI)
3951 r100_pci_gart_disable(rdev);
3952 /* Resume clock before doing reset */
3953 r100_clock_startup(rdev);
3954 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3955 if (radeon_asic_reset(rdev)(rdev)->asic->asic_reset((rdev), 0)) {
3956 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",printf("drm:pid%d:%s *WARNING* " "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r100_mm_rreg
(rdev, (0x000E40), 0), r100_mm_rreg(rdev, (0x0007C0), 0))
3957 RREG32(R_000E40_RBBM_STATUS),printf("drm:pid%d:%s *WARNING* " "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r100_mm_rreg
(rdev, (0x000E40), 0), r100_mm_rreg(rdev, (0x0007C0), 0))
3958 RREG32(R_0007C0_CP_STAT))printf("drm:pid%d:%s *WARNING* " "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r100_mm_rreg
(rdev, (0x000E40), 0), r100_mm_rreg(rdev, (0x0007C0), 0))
;
3959 }
3960 /* post */
3961 radeon_combios_asic_init(rdev->ddev);
3962 /* Resume clock after posting */
3963 r100_clock_startup(rdev);
3964 /* Initialize surface registers */
3965 radeon_surface_init(rdev);
3966
3967 rdev->accel_working = true1;
3968 r = r100_startup(rdev);
3969 if (r) {
3970 rdev->accel_working = false0;
3971 }
3972 return r;
3973}
3974
3975int r100_suspend(struct radeon_device *rdev)
3976{
3977 radeon_pm_suspend(rdev);
3978 r100_cp_disable(rdev);
3979 radeon_wb_disable(rdev);
3980 r100_irq_disable(rdev);
3981 if (rdev->flags & RADEON_IS_PCI)
3982 r100_pci_gart_disable(rdev);
3983 return 0;
3984}
3985
3986void r100_fini(struct radeon_device *rdev)
3987{
3988 radeon_pm_fini(rdev);
3989 r100_cp_fini(rdev);
3990 radeon_wb_fini(rdev);
3991 radeon_ib_pool_fini(rdev);
3992 radeon_gem_fini(rdev);
3993 if (rdev->flags & RADEON_IS_PCI)
3994 r100_pci_gart_fini(rdev);
3995 radeon_agp_fini(rdev);
3996 radeon_irq_kms_fini(rdev);
3997 radeon_fence_driver_fini(rdev);
3998 radeon_bo_fini(rdev);
3999 radeon_atombios_fini(rdev);
4000 kfree(rdev->bios);
4001 rdev->bios = NULL((void *)0);
4002}
4003
4004/*
4005 * Due to how kexec works, it can leave the hw fully initialised when it
4006 * boots the new kernel. However doing our init sequence with the CP and
4007 * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
4008 * do some quick sanity checks and restore sane values to avoid this
4009 * problem.
4010 */
4011void r100_restore_sanity(struct radeon_device *rdev)
4012{
4013 u32 tmp;
4014
4015 tmp = RREG32(RADEON_CP_CSQ_CNTL)r100_mm_rreg(rdev, (0x0740), 0);
12
Calling 'r100_mm_rreg'
16
Returning from 'r100_mm_rreg'
4016 if (tmp) {
17
Assuming 'tmp' is 0
18
Taking false branch
4017 WREG32(RADEON_CP_CSQ_CNTL, 0)r100_mm_wreg(rdev, (0x0740), (0), 0);
4018 }
4019 tmp = RREG32(RADEON_CP_RB_CNTL)r100_mm_rreg(rdev, (0x0704), 0);
19
Calling 'r100_mm_rreg'
22
Returning from 'r100_mm_rreg'
4020 if (tmp) {
23
Assuming 'tmp' is 0
24
Taking false branch
4021 WREG32(RADEON_CP_RB_CNTL, 0)r100_mm_wreg(rdev, (0x0704), (0), 0);
4022 }
4023 tmp = RREG32(RADEON_SCRATCH_UMSK)r100_mm_rreg(rdev, (0x0770), 0);
25
Calling 'r100_mm_rreg'
29
Returning from 'r100_mm_rreg'
4024 if (tmp) {
30
Assuming 'tmp' is 0
31
Taking false branch
4025 WREG32(RADEON_SCRATCH_UMSK, 0)r100_mm_wreg(rdev, (0x0770), (0), 0);
4026 }
4027}
32
Returning without writing to 'rdev->me_fw'
4028
4029int r100_init(struct radeon_device *rdev)
4030{
4031 int r;
4032
4033 /* Register debugfs file specific to this group of asics */
4034 r100_debugfs(rdev);
1
Calling 'r100_debugfs'
7
Returning from 'r100_debugfs'
4035 /* Disable VGA */
4036 r100_vga_render_disable(rdev);
8
Calling 'r100_vga_render_disable'
10
Returning from 'r100_vga_render_disable'
4037 /* Initialize scratch registers */
4038 radeon_scratch_init(rdev);
4039 /* Initialize surface registers */
4040 radeon_surface_init(rdev);
4041 /* sanity check some register to avoid hangs like after kexec */
4042 r100_restore_sanity(rdev);
11
Calling 'r100_restore_sanity'
33
Returning from 'r100_restore_sanity'
4043 /* TODO: disable VGA need to use VGA request */
4044 /* BIOS*/
4045 if (!radeon_get_bios(rdev)) {
34
Assuming the condition is false
35
Taking false branch
4046 if (ASIC_IS_AVIVO(rdev)((rdev->family >= CHIP_RS600)))
4047 return -EINVAL22;
4048 }
4049 if (rdev->is_atom_bios) {
36
Assuming field 'is_atom_bios' is false
37
Taking false branch
4050 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n")printf("drm:pid%d:%s *ERROR* " "Expecting combios for RS400/RS480 GPU\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4051 return -EINVAL22;
4052 } else {
4053 r = radeon_combios_init(rdev);
4054 if (r)
38
Assuming 'r' is 0
39
Taking false branch
4055 return r;
4056 }
4057 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
4058 if (radeon_asic_reset(rdev)(rdev)->asic->asic_reset((rdev), 0)) {
40
Assuming the condition is false
41
Taking false branch
4059 dev_warn(rdev->dev,printf("drm:pid%d:%s *WARNING* " "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r100_mm_rreg
(rdev, (0x000E40), 0), r100_mm_rreg(rdev, (0x0007C0), 0))
4060 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",printf("drm:pid%d:%s *WARNING* " "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r100_mm_rreg
(rdev, (0x000E40), 0), r100_mm_rreg(rdev, (0x0007C0), 0))
4061 RREG32(R_000E40_RBBM_STATUS),printf("drm:pid%d:%s *WARNING* " "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r100_mm_rreg
(rdev, (0x000E40), 0), r100_mm_rreg(rdev, (0x0007C0), 0))
4062 RREG32(R_0007C0_CP_STAT))printf("drm:pid%d:%s *WARNING* " "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__ , r100_mm_rreg
(rdev, (0x000E40), 0), r100_mm_rreg(rdev, (0x0007C0), 0))
;
4063 }
4064 /* check if cards are posted or not */
4065 if (radeon_boot_test_post_card(rdev) == false0)
42
Assuming the condition is false
43
Taking false branch
4066 return -EINVAL22;
4067 /* Set asic errata */
4068 r100_errata(rdev);
44
Calling 'r100_errata'
52
Returning from 'r100_errata'
4069 /* Initialize clocks */
4070 radeon_get_clock_info(rdev->ddev);
4071 /* initialize AGP */
4072 if (rdev->flags & RADEON_IS_AGP) {
53
Assuming the condition is false
54
Taking false branch
4073 r = radeon_agp_init(rdev);
4074 if (r) {
4075 radeon_agp_disable(rdev);
4076 }
4077 }
4078 /* initialize VRAM */
4079 r100_mc_init(rdev);
55
Calling 'r100_mc_init'
112
Returning from 'r100_mc_init'
4080 /* Fence driver */
4081 r = radeon_fence_driver_init(rdev);
4082 if (r)
113
Assuming 'r' is 0
114
Taking false branch
4083 return r;
4084 /* Memory manager */
4085 r = radeon_bo_init(rdev);
4086 if (r)
115
Assuming 'r' is 0
116
Taking false branch
4087 return r;
4088 if (rdev->flags & RADEON_IS_PCI) {
117
Assuming the condition is false
118
Taking false branch
4089 r = r100_pci_gart_init(rdev);
4090 if (r)
4091 return r;
4092 }
4093 r100_set_safe_registers(rdev);
4094
4095 /* Initialize power management */
4096 radeon_pm_init(rdev);
4097
4098 rdev->accel_working = true1;
4099 r = r100_startup(rdev);
119
Calling 'r100_startup'
4100 if (r) {
4101 /* Somethings want wront with the accel init stop accel */
4102 dev_err(rdev->dev, "Disabling GPU acceleration\n")printf("drm:pid%d:%s *ERROR* " "Disabling GPU acceleration\n"
, ({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc->p_p->ps_pid, __func__)
;
4103 r100_cp_fini(rdev);
4104 radeon_wb_fini(rdev);
4105 radeon_ib_pool_fini(rdev);
4106 radeon_irq_kms_fini(rdev);
4107 if (rdev->flags & RADEON_IS_PCI)
4108 r100_pci_gart_fini(rdev);
4109 rdev->accel_working = false0;
4110 }
4111 return 0;
4112}
4113
4114uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg)
4115{
4116 unsigned long flags;
4117 uint32_t ret;
4118
4119 spin_lock_irqsave(&rdev->mmio_idx_lock, flags)do { flags = 0; mtx_enter(&rdev->mmio_idx_lock); } while
(0)
;
4120 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX)iowrite32(reg, ((void *)rdev->rmmio) + 0x0000);
4121 ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA)ioread32(((void *)rdev->rmmio) + 0x0004);
4122 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags)do { (void)(flags); mtx_leave(&rdev->mmio_idx_lock); }
while (0)
;
4123 return ret;
4124}
4125
4126void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v)
4127{
4128 unsigned long flags;
4129
4130 spin_lock_irqsave(&rdev->mmio_idx_lock, flags)do { flags = 0; mtx_enter(&rdev->mmio_idx_lock); } while
(0)
;
4131 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX)iowrite32(reg, ((void *)rdev->rmmio) + 0x0000);
4132 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA)iowrite32(v, ((void *)rdev->rmmio) + 0x0004);
4133 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags)do { (void)(flags); mtx_leave(&rdev->mmio_idx_lock); }
while (0)
;
4134}
4135
4136u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
4137{
4138 u32 val;
4139
4140 if (reg < rdev->rio_mem_size) {
4141 val = bus_space_read_4(rdev->iot, rdev->rio_mem, reg)((rdev->iot)->read_4((rdev->rio_mem), (reg)));
4142 bus_space_barrier(rdev->iot, rdev->rio_mem, 0,
4143 rdev->rio_mem_size, BUS_SPACE_BARRIER_READ0x01);
4144 } else {
4145 bus_space_barrier(rdev->iot, rdev->rio_mem, 0,
4146 rdev->rio_mem_size, BUS_SPACE_BARRIER_WRITE0x02);
4147 bus_space_write_4(rdev->iot, rdev->rio_mem,((rdev->iot)->write_4((rdev->rio_mem), (0x0000), (reg
)))
4148 RADEON_MM_INDEX, reg)((rdev->iot)->write_4((rdev->rio_mem), (0x0000), (reg
)))
;
4149 val = bus_space_read_4(rdev->iot, rdev->rio_mem,((rdev->iot)->read_4((rdev->rio_mem), (0x0004)))
4150 RADEON_MM_DATA)((rdev->iot)->read_4((rdev->rio_mem), (0x0004)));
4151 bus_space_barrier(rdev->iot, rdev->rio_mem, 0,
4152 rdev->rio_mem_size, BUS_SPACE_BARRIER_READ0x01);
4153 }
4154
4155 return val;
4156}
4157
4158void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
4159{
4160 if (reg < rdev->rio_mem_size) {
4161 bus_space_barrier(rdev->iot, rdev->rio_mem, 0,
4162 rdev->rio_mem_size, BUS_SPACE_BARRIER_WRITE0x02);
4163 bus_space_write_4(rdev->iot, rdev->rio_mem, reg, v)((rdev->iot)->write_4((rdev->rio_mem), (reg), (v)));
4164 } else {
4165 bus_space_barrier(rdev->iot, rdev->rio_mem, 0,
4166 rdev->rio_mem_size, BUS_SPACE_BARRIER_WRITE0x02);
4167 bus_space_write_4(rdev->iot, rdev->rio_mem,((rdev->iot)->write_4((rdev->rio_mem), (0x0000), (reg
)))
4168 RADEON_MM_INDEX, reg)((rdev->iot)->write_4((rdev->rio_mem), (0x0000), (reg
)))
;
4169 bus_space_barrier(rdev->iot, rdev->rio_mem, 0,
4170 rdev->rio_mem_size, BUS_SPACE_BARRIER_WRITE0x02);
4171 bus_space_write_4(rdev->iot, rdev->rio_mem,((rdev->iot)->write_4((rdev->rio_mem), (0x0004), (v)
))
4172 RADEON_MM_DATA, v)((rdev->iot)->write_4((rdev->rio_mem), (0x0004), (v)
))
;
4173 }
4174}

/usr/src/sys/dev/pci/drm/radeon/radeon.h

1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_H__
29#define __RADEON_H__
30
31/* TODO: Here are things that needs to be done :
32 * - surface allocator & initializer : (bit like scratch reg) should
33 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
34 * related to surface
35 * - WB : write back stuff (do it bit like scratch reg things)
36 * - Vblank : look at Jesse's rework and what we should do
37 * - r600/r700: gart & cp
38 * - cs : clean cs ioctl use bitmap & things like that.
39 * - power management stuff
40 * - Barrier in gart code
41 * - Unmappabled vram ?
42 * - TESTING, TESTING, TESTING
43 */
44
45/* Initialization path:
46 * We expect that acceleration initialization might fail for various
47 * reasons even thought we work hard to make it works on most
48 * configurations. In order to still have a working userspace in such
49 * situation the init path must succeed up to the memory controller
50 * initialization point. Failure before this point are considered as
51 * fatal error. Here is the init callchain :
52 * radeon_device_init perform common structure, mutex initialization
53 * asic_init setup the GPU memory layout and perform all
54 * one time initialization (failure in this
55 * function are considered fatal)
56 * asic_startup setup the GPU acceleration, in order to
57 * follow guideline the first thing this
58 * function should do is setting the GPU
59 * memory controller (only MC setup failure
60 * are considered as fatal)
61 */
62
63#include <linux/atomic.h>
64#include <linux/wait.h>
65#include <linux/list.h>
66#include <linux/kref.h>
67#include <linux/interval_tree.h>
68#include <linux/hashtable.h>
69#include <linux/dma-fence.h>
70
71#ifdef CONFIG_MMU_NOTIFIER
72#include <linux/mmu_notifier.h>
73#endif
74
75#include <drm/ttm/ttm_bo_api.h>
76#include <drm/ttm/ttm_bo_driver.h>
77#include <drm/ttm/ttm_placement.h>
78#include <drm/ttm/ttm_module.h>
79#include <drm/ttm/ttm_execbuf_util.h>
80
81#include <drm/drm_gem.h>
82#include <drm/drm_legacy.h>
83
84#include <dev/wscons/wsconsio.h>
85#include <dev/wscons/wsdisplayvar.h>
86#include <dev/rasops/rasops.h>
87
88#include <dev/pci/pcivar.h>
89
90#ifdef __sparc64__
91#include <machine/fbvar.h>
92#endif
93
94#include "radeon_family.h"
95#include "radeon_mode.h"
96#include "radeon_reg.h"
97
98/*
99 * Modules parameters.
100 */
101extern int radeon_no_wb;
102extern int radeon_modeset;
103extern int radeon_dynclks;
104extern int radeon_r4xx_atom;
105extern int radeon_agpmode;
106extern int radeon_vram_limit;
107extern int radeon_gart_size;
108extern int radeon_benchmarking;
109extern int radeon_testing;
110extern int radeon_connector_table;
111extern int radeon_tv;
112extern int radeon_audio;
113extern int radeon_disp_priority;
114extern int radeon_hw_i2c;
115extern int radeon_pcie_gen2;
116extern int radeon_msi;
117extern int radeon_lockup_timeout;
118extern int radeon_fastfb;
119extern int radeon_dpm;
120extern int radeon_aspm;
121extern int radeon_runtime_pm;
122extern int radeon_hard_reset;
123extern int radeon_vm_size;
124extern int radeon_vm_block_size;
125extern int radeon_deep_color;
126extern int radeon_use_pflipirq;
127extern int radeon_bapm;
128extern int radeon_backlight;
129extern int radeon_auxch;
130extern int radeon_mst;
131extern int radeon_uvd;
132extern int radeon_vce;
133extern int radeon_si_support;
134extern int radeon_cik_support;
135
136/*
137 * Copy from radeon_drv.h so we don't have to include both and have conflicting
138 * symbol;
139 */
140#define RADEON_MAX_USEC_TIMEOUT100000 100000 /* 100 ms */
141#define RADEON_FENCE_JIFFIES_TIMEOUT(hz / 2) (HZhz / 2)
142#define RADEON_USEC_IB_TEST_TIMEOUT1000000 1000000 /* 1s */
143/* RADEON_IB_POOL_SIZE must be a power of 2 */
144#define RADEON_IB_POOL_SIZE16 16
145#define RADEON_DEBUGFS_MAX_COMPONENTS32 32
146#define RADEONFB_CONN_LIMIT4 4
147#define RADEON_BIOS_NUM_SCRATCH8 8
148
149/* internal ring indices */
150/* r1xx+ has gfx CP ring */
151#define RADEON_RING_TYPE_GFX_INDEX0 0
152
153/* cayman has 2 compute CP rings */
154#define CAYMAN_RING_TYPE_CP1_INDEX1 1
155#define CAYMAN_RING_TYPE_CP2_INDEX2 2
156
157/* R600+ has an async dma ring */
158#define R600_RING_TYPE_DMA_INDEX3 3
159/* cayman add a second async dma ring */
160#define CAYMAN_RING_TYPE_DMA1_INDEX4 4
161
162/* R600+ */
163#define R600_RING_TYPE_UVD_INDEX5 5
164
165/* TN+ */
166#define TN_RING_TYPE_VCE1_INDEX6 6
167#define TN_RING_TYPE_VCE2_INDEX7 7
168
169/* max number of rings */
170#define RADEON_NUM_RINGS8 8
171
172/* number of hw syncs before falling back on blocking */
173#define RADEON_NUM_SYNCS4 4
174
175/* hardcode those limit for now */
176#define RADEON_VA_IB_OFFSET(1 << 20) (1 << 20)
177#define RADEON_VA_RESERVED_SIZE(8 << 20) (8 << 20)
178#define RADEON_IB_VM_MAX_SIZE(64 << 10) (64 << 10)
179
180/* hard reset data */
181#define RADEON_ASIC_RESET_DATA0x39d5e86b 0x39d5e86b
182
183/* reset flags */
184#define RADEON_RESET_GFX(1 << 0) (1 << 0)
185#define RADEON_RESET_COMPUTE(1 << 1) (1 << 1)
186#define RADEON_RESET_DMA(1 << 2) (1 << 2)
187#define RADEON_RESET_CP(1 << 3) (1 << 3)
188#define RADEON_RESET_GRBM(1 << 4) (1 << 4)
189#define RADEON_RESET_DMA1(1 << 5) (1 << 5)
190#define RADEON_RESET_RLC(1 << 6) (1 << 6)
191#define RADEON_RESET_SEM(1 << 7) (1 << 7)
192#define RADEON_RESET_IH(1 << 8) (1 << 8)
193#define RADEON_RESET_VMC(1 << 9) (1 << 9)
194#define RADEON_RESET_MC(1 << 10) (1 << 10)
195#define RADEON_RESET_DISPLAY(1 << 11) (1 << 11)
196
197/* CG block flags */
198#define RADEON_CG_BLOCK_GFX(1 << 0) (1 << 0)
199#define RADEON_CG_BLOCK_MC(1 << 1) (1 << 1)
200#define RADEON_CG_BLOCK_SDMA(1 << 2) (1 << 2)
201#define RADEON_CG_BLOCK_UVD(1 << 3) (1 << 3)
202#define RADEON_CG_BLOCK_VCE(1 << 4) (1 << 4)
203#define RADEON_CG_BLOCK_HDP(1 << 5) (1 << 5)
204#define RADEON_CG_BLOCK_BIF(1 << 6) (1 << 6)
205
206/* CG flags */
207#define RADEON_CG_SUPPORT_GFX_MGCG(1 << 0) (1 << 0)
208#define RADEON_CG_SUPPORT_GFX_MGLS(1 << 1) (1 << 1)
209#define RADEON_CG_SUPPORT_GFX_CGCG(1 << 2) (1 << 2)
210#define RADEON_CG_SUPPORT_GFX_CGLS(1 << 3) (1 << 3)
211#define RADEON_CG_SUPPORT_GFX_CGTS(1 << 4) (1 << 4)
212#define RADEON_CG_SUPPORT_GFX_CGTS_LS(1 << 5) (1 << 5)
213#define RADEON_CG_SUPPORT_GFX_CP_LS(1 << 6) (1 << 6)
214#define RADEON_CG_SUPPORT_GFX_RLC_LS(1 << 7) (1 << 7)
215#define RADEON_CG_SUPPORT_MC_LS(1 << 8) (1 << 8)
216#define RADEON_CG_SUPPORT_MC_MGCG(1 << 9) (1 << 9)
217#define RADEON_CG_SUPPORT_SDMA_LS(1 << 10) (1 << 10)
218#define RADEON_CG_SUPPORT_SDMA_MGCG(1 << 11) (1 << 11)
219#define RADEON_CG_SUPPORT_BIF_LS(1 << 12) (1 << 12)
220#define RADEON_CG_SUPPORT_UVD_MGCG(1 << 13) (1 << 13)
221#define RADEON_CG_SUPPORT_VCE_MGCG(1 << 14) (1 << 14)
222#define RADEON_CG_SUPPORT_HDP_LS(1 << 15) (1 << 15)
223#define RADEON_CG_SUPPORT_HDP_MGCG(1 << 16) (1 << 16)
224
225/* PG flags */
226#define RADEON_PG_SUPPORT_GFX_PG(1 << 0) (1 << 0)
227#define RADEON_PG_SUPPORT_GFX_SMG(1 << 1) (1 << 1)
228#define RADEON_PG_SUPPORT_GFX_DMG(1 << 2) (1 << 2)
229#define RADEON_PG_SUPPORT_UVD(1 << 3) (1 << 3)
230#define RADEON_PG_SUPPORT_VCE(1 << 4) (1 << 4)
231#define RADEON_PG_SUPPORT_CP(1 << 5) (1 << 5)
232#define RADEON_PG_SUPPORT_GDS(1 << 6) (1 << 6)
233#define RADEON_PG_SUPPORT_RLC_SMU_HS(1 << 7) (1 << 7)
234#define RADEON_PG_SUPPORT_SDMA(1 << 8) (1 << 8)
235#define RADEON_PG_SUPPORT_ACP(1 << 9) (1 << 9)
236#define RADEON_PG_SUPPORT_SAMU(1 << 10) (1 << 10)
237
238/* max cursor sizes (in pixels) */
239#define CURSOR_WIDTH64 64
240#define CURSOR_HEIGHT64 64
241
242#define CIK_CURSOR_WIDTH128 128
243#define CIK_CURSOR_HEIGHT128 128
244
245/*
246 * Errata workarounds.
247 */
248enum radeon_pll_errata {
249 CHIP_ERRATA_R300_CG = 0x00000001,
250 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002,
251 CHIP_ERRATA_PLL_DELAY = 0x00000004
252};
253
254
255struct radeon_device;
256
257
258/*
259 * BIOS.
260 */
261bool_Bool radeon_get_bios(struct radeon_device *rdev);
262
263/*
264 * Dummy page
265 */
266struct radeon_dummy_page {
267 uint64_t entry;
268 struct drm_dmamem *dmah;
269 dma_addr_t addr;
270};
271int radeon_dummy_page_init(struct radeon_device *rdev);
272void radeon_dummy_page_fini(struct radeon_device *rdev);
273
274
275/*
276 * Clocks
277 */
278struct radeon_clock {
279 struct radeon_pll p1pll;
280 struct radeon_pll p2pll;
281 struct radeon_pll dcpll;
282 struct radeon_pll spll;
283 struct radeon_pll mpll;
284 /* 10 Khz units */
285 uint32_t default_mclk;
286 uint32_t default_sclk;
287 uint32_t default_dispclk;
288 uint32_t current_dispclk;
289 uint32_t dp_extclk;
290 uint32_t max_pixel_clock;
291 uint32_t vco_freq;
292};
293
294/*
295 * Power management
296 */
297int radeon_pm_init(struct radeon_device *rdev);
298int radeon_pm_late_init(struct radeon_device *rdev);
299void radeon_pm_fini(struct radeon_device *rdev);
300void radeon_pm_compute_clocks(struct radeon_device *rdev);
301void radeon_pm_suspend(struct radeon_device *rdev);
302void radeon_pm_resume(struct radeon_device *rdev);
303void radeon_combios_get_power_modes(struct radeon_device *rdev);
304void radeon_atombios_get_power_modes(struct radeon_device *rdev);
305int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
306 u8 clock_type,
307 u32 clock,
308 bool_Bool strobe_mode,
309 struct atom_clock_dividers *dividers);
310int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
311 u32 clock,
312 bool_Bool strobe_mode,
313 struct atom_mpll_param *mpll_param);
314void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
315int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
316 u16 voltage_level, u8 voltage_type,
317 u32 *gpio_value, u32 *gpio_mask);
318void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
319 u32 eng_clock, u32 mem_clock);
320int radeon_atom_get_voltage_step(struct radeon_device *rdev,
321 u8 voltage_type, u16 *voltage_step);
322int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
323 u16 voltage_id, u16 *voltage);
324int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
325 u16 *voltage,
326 u16 leakage_idx);
327int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
328 u16 *leakage_id);
329int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
330 u16 *vddc, u16 *vddci,
331 u16 virtual_voltage_id,
332 u16 vbios_voltage_id);
333int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
334 u16 virtual_voltage_id,
335 u16 *voltage);
336int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
337 u8 voltage_type,
338 u16 nominal_voltage,
339 u16 *true_voltage);
340int radeon_atom_get_min_voltage(struct radeon_device *rdev,
341 u8 voltage_type, u16 *min_voltage);
342int radeon_atom_get_max_voltage(struct radeon_device *rdev,
343 u8 voltage_type, u16 *max_voltage);
344int radeon_atom_get_voltage_table(struct radeon_device *rdev,
345 u8 voltage_type, u8 voltage_mode,
346 struct atom_voltage_table *voltage_table);
347bool_Bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
348 u8 voltage_type, u8 voltage_mode);
349int radeon_atom_get_svi2_info(struct radeon_device *rdev,
350 u8 voltage_type,
351 u8 *svd_gpio_id, u8 *svc_gpio_id);
352void radeon_atom_update_memory_dll(struct radeon_device *rdev,
353 u32 mem_clock);
354void radeon_atom_set_ac_timing(struct radeon_device *rdev,
355 u32 mem_clock);
356int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
357 u8 module_index,
358 struct atom_mc_reg_table *reg_table);
359int radeon_atom_get_memory_info(struct radeon_device *rdev,
360 u8 module_index, struct atom_memory_info *mem_info);
361int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
362 bool_Bool gddr5, u8 module_index,
363 struct atom_memory_clock_range_table *mclk_range_table);
364int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
365 u16 voltage_id, u16 *voltage);
366void rs690_pm_info(struct radeon_device *rdev);
367extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
368 unsigned *bankh, unsigned *mtaspect,
369 unsigned *tile_split);
370
371/*
372 * Fences.
373 */
374struct radeon_fence_driver {
375 struct radeon_device *rdev;
376 uint32_t scratch_reg;
377 uint64_t gpu_addr;
378 volatile uint32_t *cpu_addr;
379 /* sync_seq is protected by ring emission lock */
380 uint64_t sync_seq[RADEON_NUM_RINGS8];
381 atomic64_t last_seq;
382 bool_Bool initialized, delayed_irq;
383 struct delayed_work lockup_work;
384};
385
386struct radeon_fence {
387 struct dma_fence base;
388
389 struct radeon_device *rdev;
390 uint64_t seq;
391 /* RB, DMA, etc. */
392 unsigned ring;
393 bool_Bool is_vm_update;
394
395 wait_queue_entry_t fence_wake;
396};
397
398int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
399int radeon_fence_driver_init(struct radeon_device *rdev);
400void radeon_fence_driver_fini(struct radeon_device *rdev);
401void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
402int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
403void radeon_fence_process(struct radeon_device *rdev, int ring);
404bool_Bool radeon_fence_signaled(struct radeon_fence *fence);
405long radeon_fence_wait_timeout(struct radeon_fence *fence, bool_Bool interruptible, long timeout);
406int radeon_fence_wait(struct radeon_fence *fence, bool_Bool interruptible);
407int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
408int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
409int radeon_fence_wait_any(struct radeon_device *rdev,
410 struct radeon_fence **fences,
411 bool_Bool intr);
412struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
413void radeon_fence_unref(struct radeon_fence **fence);
414unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
415bool_Bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
416void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
417static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
418 struct radeon_fence *b)
419{
420 if (!a) {
421 return b;
422 }
423
424 if (!b) {
425 return a;
426 }
427
428 BUG_ON(a->ring != b->ring)((!(a->ring != b->ring)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/drm/radeon/radeon.h", 428, "!(a->ring != b->ring)"
))
;
429
430 if (a->seq > b->seq) {
431 return a;
432 } else {
433 return b;
434 }
435}
436
437static inline bool_Bool radeon_fence_is_earlier(struct radeon_fence *a,
438 struct radeon_fence *b)
439{
440 if (!a) {
441 return false0;
442 }
443
444 if (!b) {
445 return true1;
446 }
447
448 BUG_ON(a->ring != b->ring)((!(a->ring != b->ring)) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/drm/radeon/radeon.h", 448, "!(a->ring != b->ring)"
))
;
449
450 return a->seq < b->seq;
451}
452
453/*
454 * Tiling registers
455 */
456struct radeon_surface_reg {
457 struct radeon_bo *bo;
458};
459
460#define RADEON_GEM_MAX_SURFACES8 8
461
462/*
463 * TTM.
464 */
465struct radeon_mman {
466 struct ttm_bo_device bdev;
467 bool_Bool initialized;
468
469#if defined(CONFIG_DEBUG_FS)
470 struct dentry *vram;
471 struct dentry *gtt;
472#endif
473};
474
475struct radeon_bo_list {
476 struct radeon_bo *robj;
477 struct ttm_validate_buffer tv;
478 uint64_t gpu_offset;
479 unsigned preferred_domains;
480 unsigned allowed_domains;
481 uint32_t tiling_flags;
482};
483
484/* bo virtual address in a specific vm */
485struct radeon_bo_va {
486 /* protected by bo being reserved */
487 struct list_head bo_list;
488 uint32_t flags;
489 struct radeon_fence *last_pt_update;
490 unsigned ref_count;
491
492 /* protected by vm mutex */
493 struct interval_tree_node it;
494 struct list_head vm_status;
495
496 /* constant after initialization */
497 struct radeon_vm *vm;
498 struct radeon_bo *bo;
499};
500
501struct radeon_bo {
502 /* Protected by gem.mutex */
503 struct list_head list;
504 /* Protected by tbo.reserved */
505 u32 initial_domain;
506 struct ttm_place placements[4];
507 struct ttm_placement placement;
508 struct ttm_buffer_object tbo;
509 struct ttm_bo_kmap_obj kmap;
510 u32 flags;
511 unsigned pin_count;
512 void *kptr;
513 u32 tiling_flags;
514 u32 pitch;
515 int surface_reg;
516 unsigned prime_shared_count;
517 /* list of all virtual address to which this bo
518 * is associated to
519 */
520 struct list_head va;
521 /* Constant after initialization */
522 struct radeon_device *rdev;
523
524 struct ttm_bo_kmap_obj dma_buf_vmap;
525 pid_t pid;
526
527#ifdef CONFIG_MMU_NOTIFIER
528 struct mmu_interval_notifier notifier;
529#endif
530};
531#define gem_to_radeon_bo(gobj)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
container_of((gobj), struct radeon_bo, tbo.base)({ const __typeof( ((struct radeon_bo *)0)->tbo.base ) *__mptr
= ((gobj)); (struct radeon_bo *)( (char *)__mptr - __builtin_offsetof
(struct radeon_bo, tbo.base) );})
532
533int radeon_gem_debugfs_init(struct radeon_device *rdev);
534
535/* sub-allocation manager, it has to be protected by another lock.
536 * By conception this is an helper for other part of the driver
537 * like the indirect buffer or semaphore, which both have their
538 * locking.
539 *
540 * Principe is simple, we keep a list of sub allocation in offset
541 * order (first entry has offset == 0, last entry has the highest
542 * offset).
543 *
544 * When allocating new object we first check if there is room at
545 * the end total_size - (last_object_offset + last_object_size) >=
546 * alloc_size. If so we allocate new object there.
547 *
548 * When there is not enough room at the end, we start waiting for
549 * each sub object until we reach object_offset+object_size >=
550 * alloc_size, this object then become the sub object we return.
551 *
552 * Alignment can't be bigger than page size.
553 *
554 * Hole are not considered for allocation to keep things simple.
555 * Assumption is that there won't be hole (all object on same
556 * alignment).
557 */
558struct radeon_sa_manager {
559 wait_queue_head_t wq;
560 struct radeon_bo *bo;
561 struct list_head *hole;
562 struct list_head flist[RADEON_NUM_RINGS8];
563 struct list_head olist;
564 unsigned size;
565 uint64_t gpu_addr;
566 void *cpu_ptr;
567 uint32_t domain;
568 uint32_t align;
569};
570
571struct radeon_sa_bo;
572
573/* sub-allocation buffer */
574struct radeon_sa_bo {
575 struct list_head olist;
576 struct list_head flist;
577 struct radeon_sa_manager *manager;
578 unsigned soffset;
579 unsigned eoffset;
580 struct radeon_fence *fence;
581};
582
583/*
584 * GEM objects.
585 */
586struct radeon_gem {
587 struct rwlock mutex;
588 struct list_head objects;
589};
590
591int radeon_gem_init(struct radeon_device *rdev);
592void radeon_gem_fini(struct radeon_device *rdev);
593int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
594 int alignment, int initial_domain,
595 u32 flags, bool_Bool kernel,
596 struct drm_gem_object **obj);
597
598int radeon_mode_dumb_create(struct drm_file *file_priv,
599 struct drm_device *dev,
600 struct drm_mode_create_dumb *args);
601int radeon_mode_dumb_mmap(struct drm_file *filp,
602 struct drm_device *dev,
603 uint32_t handle, uint64_t *offset_p);
604
605/*
606 * Semaphores.
607 */
608struct radeon_semaphore {
609 struct radeon_sa_bo *sa_bo;
610 signed waiters;
611 uint64_t gpu_addr;
612};
613
614int radeon_semaphore_create(struct radeon_device *rdev,
615 struct radeon_semaphore **semaphore);
616bool_Bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
617 struct radeon_semaphore *semaphore);
618bool_Bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
619 struct radeon_semaphore *semaphore);
620void radeon_semaphore_free(struct radeon_device *rdev,
621 struct radeon_semaphore **semaphore,
622 struct radeon_fence *fence);
623
624/*
625 * Synchronization
626 */
627struct radeon_sync {
628 struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS4];
629 struct radeon_fence *sync_to[RADEON_NUM_RINGS8];
630 struct radeon_fence *last_vm_update;
631};
632
633void radeon_sync_create(struct radeon_sync *sync);
634void radeon_sync_fence(struct radeon_sync *sync,
635 struct radeon_fence *fence);
636int radeon_sync_resv(struct radeon_device *rdev,
637 struct radeon_sync *sync,
638 struct dma_resv *resv,
639 bool_Bool shared);
640int radeon_sync_rings(struct radeon_device *rdev,
641 struct radeon_sync *sync,
642 int waiting_ring);
643void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync,
644 struct radeon_fence *fence);
645
646/*
647 * GART structures, functions & helpers
648 */
649struct radeon_mc;
650
651#define RADEON_GPU_PAGE_SIZE4096 4096
652#define RADEON_GPU_PAGE_MASK(4096 - 1) (RADEON_GPU_PAGE_SIZE4096 - 1)
653#define RADEON_GPU_PAGE_SHIFT12 12
654#define RADEON_GPU_PAGE_ALIGN(a)(((a) + (4096 - 1)) & ~(4096 - 1)) (((a) + RADEON_GPU_PAGE_MASK(4096 - 1)) & ~RADEON_GPU_PAGE_MASK(4096 - 1))
655
656#define RADEON_GART_PAGE_DUMMY0 0
657#define RADEON_GART_PAGE_VALID(1 << 0) (1 << 0)
658#define RADEON_GART_PAGE_READ(1 << 1) (1 << 1)
659#define RADEON_GART_PAGE_WRITE(1 << 2) (1 << 2)
660#define RADEON_GART_PAGE_SNOOP(1 << 3) (1 << 3)
661
662struct radeon_gart {
663 dma_addr_t table_addr;
664 struct drm_dmamem *dmah;
665 struct radeon_bo *robj;
666 void *ptr;
667 unsigned num_gpu_pages;
668 unsigned num_cpu_pages;
669 unsigned table_size;
670 struct vm_page **pages;
671 uint64_t *pages_entry;
672 bool_Bool ready;
673};
674
675int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
676void radeon_gart_table_ram_free(struct radeon_device *rdev);
677int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
678void radeon_gart_table_vram_free(struct radeon_device *rdev);
679int radeon_gart_table_vram_pin(struct radeon_device *rdev);
680void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
681int radeon_gart_init(struct radeon_device *rdev);
682void radeon_gart_fini(struct radeon_device *rdev);
683void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
684 int pages);
685int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
686 int pages, struct vm_page **pagelist,
687 dma_addr_t *dma_addr, uint32_t flags);
688
689
690/*
691 * GPU MC structures, functions & helpers
692 */
693struct radeon_mc {
694 resource_size_t aper_size;
695 resource_size_t aper_base;
696 resource_size_t agp_base;
697 /* for some chips with <= 32MB we need to lie
698 * about vram size near mc fb location */
699 u64 mc_vram_size;
700 u64 visible_vram_size;
701 u64 gtt_size;
702 u64 gtt_start;
703 u64 gtt_end;
704 u64 vram_start;
705 u64 vram_end;
706 unsigned vram_width;
707 u64 real_vram_size;
708 int vram_mtrr;
709 bool_Bool vram_is_ddr;
710 bool_Bool igp_sideport_enabled;
711 u64 gtt_base_align;
712 u64 mc_mask;
713};
714
715bool_Bool radeon_combios_sideport_present(struct radeon_device *rdev);
716bool_Bool radeon_atombios_sideport_present(struct radeon_device *rdev);
717
718/*
719 * GPU scratch registers structures, functions & helpers
720 */
721struct radeon_scratch {
722 unsigned num_reg;
723 uint32_t reg_base;
724 bool_Bool free[32];
725 uint32_t reg[32];
726};
727
728int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
729void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
730
731/*
732 * GPU doorbell structures, functions & helpers
733 */
734#define RADEON_MAX_DOORBELLS1024 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */
735
736struct radeon_doorbell {
737 /* doorbell mmio */
738 resource_size_t base;
739 resource_size_t size;
740 u32 __iomem *ptr;
741 bus_space_handle_t bsh;
742 u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
743 DECLARE_BITMAP(used, RADEON_MAX_DOORBELLS)unsigned long used[((((1024)) + ((8 * sizeof(long)) - 1)) / (
8 * sizeof(long)))];
;
744};
745
746int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
747void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
748
749/*
750 * IRQS.
751 */
752
753struct radeon_flip_work {
754 struct work_struct flip_work;
755 struct work_struct unpin_work;
756 struct radeon_device *rdev;
757 int crtc_id;
758 u32 target_vblank;
759 uint64_t base;
760 struct drm_pending_vblank_event *event;
761 struct radeon_bo *old_rbo;
762 struct dma_fence *fence;
763 bool_Bool async;
764};
765
766struct r500_irq_stat_regs {
767 u32 disp_int;
768 u32 hdmi0_status;
769};
770
771struct r600_irq_stat_regs {
772 u32 disp_int;
773 u32 disp_int_cont;
774 u32 disp_int_cont2;
775 u32 d1grph_int;
776 u32 d2grph_int;
777 u32 hdmi0_status;
778 u32 hdmi1_status;
779};
780
781struct evergreen_irq_stat_regs {
782 u32 disp_int[6];
783 u32 grph_int[6];
784 u32 afmt_status[6];
785};
786
787struct cik_irq_stat_regs {
788 u32 disp_int;
789 u32 disp_int_cont;
790 u32 disp_int_cont2;
791 u32 disp_int_cont3;
792 u32 disp_int_cont4;
793 u32 disp_int_cont5;
794 u32 disp_int_cont6;
795 u32 d1grph_int;
796 u32 d2grph_int;
797 u32 d3grph_int;
798 u32 d4grph_int;
799 u32 d5grph_int;
800 u32 d6grph_int;
801};
802
803union radeon_irq_stat_regs {
804 struct r500_irq_stat_regs r500;
805 struct r600_irq_stat_regs r600;
806 struct evergreen_irq_stat_regs evergreen;
807 struct cik_irq_stat_regs cik;
808};
809
810struct radeon_irq {
811 bool_Bool installed;
812 spinlock_t lock;
813 atomic_t ring_int[RADEON_NUM_RINGS8];
814 bool_Bool crtc_vblank_int[RADEON_MAX_CRTCS6];
815 atomic_t pflip[RADEON_MAX_CRTCS6];
816 wait_queue_head_t vblank_queue;
817 bool_Bool hpd[RADEON_MAX_HPD_PINS7];
818 bool_Bool afmt[RADEON_MAX_AFMT_BLOCKS7];
819 union radeon_irq_stat_regs stat_regs;
820 bool_Bool dpm_thermal;
821};
822
823int radeon_irq_kms_init(struct radeon_device *rdev);
824void radeon_irq_kms_fini(struct radeon_device *rdev);
825void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
826bool_Bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring);
827void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
828void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
829void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
830void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
831void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
832void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
833void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
834
835/*
836 * CP & rings.
837 */
838
839struct radeon_ib {
840 struct radeon_sa_bo *sa_bo;
841 uint32_t length_dw;
842 uint64_t gpu_addr;
843 uint32_t *ptr;
844 int ring;
845 struct radeon_fence *fence;
846 struct radeon_vm *vm;
847 bool_Bool is_const_ib;
848 struct radeon_sync sync;
849};
850
851struct radeon_ring {
852 struct radeon_bo *ring_obj;
853 volatile uint32_t *ring;
854 unsigned rptr_offs;
855 unsigned rptr_save_reg;
856 u64 next_rptr_gpu_addr;
857 volatile u32 *next_rptr_cpu_addr;
858 unsigned wptr;
859 unsigned wptr_old;
860 unsigned ring_size;
861 unsigned ring_free_dw;
862 int count_dw;
863 atomic_t last_rptr;
864 atomic64_t last_activity;
865 uint64_t gpu_addr;
866 uint32_t align_mask;
867 uint32_t ptr_mask;
868 bool_Bool ready;
869 u32 nop;
870 u32 idx;
871 u64 last_semaphore_signal_addr;
872 u64 last_semaphore_wait_addr;
873 /* for CIK queues */
874 u32 me;
875 u32 pipe;
876 u32 queue;
877 struct radeon_bo *mqd_obj;
878 u32 doorbell_index;
879 unsigned wptr_offs;
880};
881
882struct radeon_mec {
883 struct radeon_bo *hpd_eop_obj;
884 u64 hpd_eop_gpu_addr;
885 u32 num_pipe;
886 u32 num_mec;
887 u32 num_queue;
888};
889
890/*
891 * VM
892 */
893
894/* maximum number of VMIDs */
895#define RADEON_NUM_VM16 16
896
897/* number of entries in page table */
898#define RADEON_VM_PTE_COUNT(1 << radeon_vm_block_size) (1 << radeon_vm_block_size)
899
900/* PTBs (Page Table Blocks) need to be aligned to 32K */
901#define RADEON_VM_PTB_ALIGN_SIZE32768 32768
902#define RADEON_VM_PTB_ALIGN_MASK(32768 - 1) (RADEON_VM_PTB_ALIGN_SIZE32768 - 1)
903#define RADEON_VM_PTB_ALIGN(a)(((a) + (32768 - 1)) & ~(32768 - 1)) (((a) + RADEON_VM_PTB_ALIGN_MASK(32768 - 1)) & ~RADEON_VM_PTB_ALIGN_MASK(32768 - 1))
904
905#define R600_PTE_VALID(1 << 0) (1 << 0)
906#define R600_PTE_SYSTEM(1 << 1) (1 << 1)
907#define R600_PTE_SNOOPED(1 << 2) (1 << 2)
908#define R600_PTE_READABLE(1 << 5) (1 << 5)
909#define R600_PTE_WRITEABLE(1 << 6) (1 << 6)
910
911/* PTE (Page Table Entry) fragment field for different page sizes */
912#define R600_PTE_FRAG_4KB(0 << 7) (0 << 7)
913#define R600_PTE_FRAG_64KB(4 << 7) (4 << 7)
914#define R600_PTE_FRAG_256KB(6 << 7) (6 << 7)
915
916/* flags needed to be set so we can copy directly from the GART table */
917#define R600_PTE_GART_MASK( (1 << 5) | (1 << 6) | (1 << 1) | (1 <<
0) )
( R600_PTE_READABLE(1 << 5) | R600_PTE_WRITEABLE(1 << 6) | \
918 R600_PTE_SYSTEM(1 << 1) | R600_PTE_VALID(1 << 0) )
919
920struct radeon_vm_pt {
921 struct radeon_bo *bo;
922 uint64_t addr;
923};
924
925struct radeon_vm_id {
926 unsigned id;
927 uint64_t pd_gpu_addr;
928 /* last flushed PD/PT update */
929 struct radeon_fence *flushed_updates;
930 /* last use of vmid */
931 struct radeon_fence *last_id_use;
932};
933
934struct radeon_vm {
935 struct rwlock mutex;
936
937 struct rb_root_cached va;
938
939 /* protecting invalidated and freed */
940 spinlock_t status_lock;
941
942 /* BOs moved, but not yet updated in the PT */
943 struct list_head invalidated;
944
945 /* BOs freed, but not yet updated in the PT */
946 struct list_head freed;
947
948 /* BOs cleared in the PT */
949 struct list_head cleared;
950
951 /* contains the page directory */
952 struct radeon_bo *page_directory;
953 unsigned max_pde_used;
954
955 /* array of page tables, one for each page directory entry */
956 struct radeon_vm_pt *page_tables;
957
958 struct radeon_bo_va *ib_bo_va;
959
960 /* for id and flush management per ring */
961 struct radeon_vm_id ids[RADEON_NUM_RINGS8];
962};
963
964struct radeon_vm_manager {
965 struct radeon_fence *active[RADEON_NUM_VM16];
966 uint32_t max_pfn;
967 /* number of VMIDs */
968 unsigned nvm;
969 /* vram base address for page table entry */
970 u64 vram_base_offset;
971 /* is vm enabled? */
972 bool_Bool enabled;
973 /* for hw to save the PD addr on suspend/resume */
974 uint32_t saved_table_addr[RADEON_NUM_VM16];
975};
976
977/*
978 * file private structure
979 */
980struct radeon_fpriv {
981 struct radeon_vm vm;
982};
983
984/*
985 * R6xx+ IH ring
986 */
987struct r600_ih {
988 struct radeon_bo *ring_obj;
989 volatile uint32_t *ring;
990 unsigned rptr;
991 unsigned ring_size;
992 uint64_t gpu_addr;
993 uint32_t ptr_mask;
994 atomic_t lock;
995 bool_Bool enabled;
996};
997
998/*
999 * RLC stuff
1000 */
1001#include "clearstate_defs.h"
1002
1003struct radeon_rlc {
1004 /* for power gating */
1005 struct radeon_bo *save_restore_obj;
1006 uint64_t save_restore_gpu_addr;
1007 volatile uint32_t *sr_ptr;
1008 const u32 *reg_list;
1009 u32 reg_list_size;
1010 /* for clear state */
1011 struct radeon_bo *clear_state_obj;
1012 uint64_t clear_state_gpu_addr;
1013 volatile uint32_t *cs_ptr;
1014 const struct cs_section_def *cs_data;
1015 u32 clear_state_size;
1016 /* for cp tables */
1017 struct radeon_bo *cp_table_obj;
1018 uint64_t cp_table_gpu_addr;
1019 volatile uint32_t *cp_table_ptr;
1020 u32 cp_table_size;
1021};
1022
1023int radeon_ib_get(struct radeon_device *rdev, int ring,
1024 struct radeon_ib *ib, struct radeon_vm *vm,
1025 unsigned size);
1026void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
1027int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
1028 struct radeon_ib *const_ib, bool_Bool hdp_flush);
1029int radeon_ib_pool_init(struct radeon_device *rdev);
1030void radeon_ib_pool_fini(struct radeon_device *rdev);
1031int radeon_ib_ring_tests(struct radeon_device *rdev);
1032/* Ring access between begin & end cannot sleep */
1033bool_Bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
1034 struct radeon_ring *ring);
1035void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
1036int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
1037int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
1038void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
1039 bool_Bool hdp_flush);
1040void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
1041 bool_Bool hdp_flush);
1042void radeon_ring_undo(struct radeon_ring *ring);
1043void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
1044int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
1045void radeon_ring_lockup_update(struct radeon_device *rdev,
1046 struct radeon_ring *ring);
1047bool_Bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
1048unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
1049 uint32_t **data);
1050int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
1051 unsigned size, uint32_t *data);
1052int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
1053 unsigned rptr_offs, u32 nop);
1054void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
1055
1056
1057/* r600 async dma */
1058void r600_dma_stop(struct radeon_device *rdev);
1059int r600_dma_resume(struct radeon_device *rdev);
1060void r600_dma_fini(struct radeon_device *rdev);
1061
1062void cayman_dma_stop(struct radeon_device *rdev);
1063int cayman_dma_resume(struct radeon_device *rdev);
1064void cayman_dma_fini(struct radeon_device *rdev);
1065
1066/*
1067 * CS.
1068 */
1069struct radeon_cs_chunk {
1070 uint32_t length_dw;
1071 uint32_t *kdata;
1072 void __user *user_ptr;
1073};
1074
1075struct radeon_cs_parser {
1076 struct device *dev;
1077 struct radeon_device *rdev;
1078 struct drm_file *filp;
1079 /* chunks */
1080 unsigned nchunks;
1081 struct radeon_cs_chunk *chunks;
1082 uint64_t *chunks_array;
1083 /* IB */
1084 unsigned idx;
1085 /* relocations */
1086 unsigned nrelocs;
1087 struct radeon_bo_list *relocs;
1088 struct radeon_bo_list *vm_bos;
1089 struct list_head validated;
1090 unsigned dma_reloc_idx;
1091 /* indices of various chunks */
1092 struct radeon_cs_chunk *chunk_ib;
1093 struct radeon_cs_chunk *chunk_relocs;
1094 struct radeon_cs_chunk *chunk_flags;
1095 struct radeon_cs_chunk *chunk_const_ib;
1096 struct radeon_ib ib;
1097 struct radeon_ib const_ib;
1098 void *track;
1099 unsigned family;
1100 int parser_error;
1101 u32 cs_flags;
1102 u32 ring;
1103 s32 priority;
1104 struct ww_acquire_ctx ticket;
1105};
1106
1107static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
1108{
1109 struct radeon_cs_chunk *ibc = p->chunk_ib;
1110
1111 if (ibc->kdata)
1112 return ibc->kdata[idx];
1113 return p->ib.ptr[idx];
1114}
1115
1116
1117struct radeon_cs_packet {
1118 unsigned idx;
1119 unsigned type;
1120 unsigned reg;
1121 unsigned opcode;
1122 int count;
1123 unsigned one_reg_wr;
1124};
1125
1126typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
1127 struct radeon_cs_packet *pkt,
1128 unsigned idx, unsigned reg);
1129typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
1130 struct radeon_cs_packet *pkt);
1131
1132
1133/*
1134 * AGP
1135 */
1136int radeon_agp_init(struct radeon_device *rdev);
1137void radeon_agp_resume(struct radeon_device *rdev);
1138void radeon_agp_suspend(struct radeon_device *rdev);
1139void radeon_agp_fini(struct radeon_device *rdev);
1140
1141
1142/*
1143 * Writeback
1144 */
1145struct radeon_wb {
1146 struct radeon_bo *wb_obj;
1147 volatile uint32_t *wb;
1148 uint64_t gpu_addr;
1149 bool_Bool enabled;
1150 bool_Bool use_event;
1151};
1152
1153#define RADEON_WB_SCRATCH_OFFSET0 0
1154#define RADEON_WB_RING0_NEXT_RPTR256 256
1155#define RADEON_WB_CP_RPTR_OFFSET1024 1024
1156#define RADEON_WB_CP1_RPTR_OFFSET1280 1280
1157#define RADEON_WB_CP2_RPTR_OFFSET1536 1536
1158#define R600_WB_DMA_RPTR_OFFSET1792 1792
1159#define R600_WB_IH_WPTR_OFFSET2048 2048
1160#define CAYMAN_WB_DMA1_RPTR_OFFSET2304 2304
1161#define R600_WB_EVENT_OFFSET3072 3072
1162#define CIK_WB_CP1_WPTR_OFFSET3328 3328
1163#define CIK_WB_CP2_WPTR_OFFSET3584 3584
1164#define R600_WB_DMA_RING_TEST_OFFSET3588 3588
1165#define CAYMAN_WB_DMA1_RING_TEST_OFFSET3592 3592
1166
1167/**
1168 * struct radeon_pm - power management datas
1169 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
1170 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880)
1171 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880)
1172 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880)
1173 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880)
1174 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP)
1175 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
1176 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
1177 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
1178 * @sclk: GPU clock Mhz (core bandwidth depends of this clock)
1179 * @needed_bandwidth: current bandwidth needs
1180 *
1181 * It keeps track of various data needed to take powermanagement decision.
1182 * Bandwidth need is used to determine minimun clock of the GPU and memory.
1183 * Equation between gpu/memory clock and available bandwidth is hw dependent
1184 * (type of memory, bus size, efficiency, ...)
1185 */
1186
1187enum radeon_pm_method {
1188 PM_METHOD_PROFILE,
1189 PM_METHOD_DYNPM,
1190 PM_METHOD_DPM,
1191};
1192
1193enum radeon_dynpm_state {
1194 DYNPM_STATE_DISABLED,
1195 DYNPM_STATE_MINIMUM,
1196 DYNPM_STATE_PAUSED,
1197 DYNPM_STATE_ACTIVE,
1198 DYNPM_STATE_SUSPENDED,
1199};
1200enum radeon_dynpm_action {
1201 DYNPM_ACTION_NONE,
1202 DYNPM_ACTION_MINIMUM,
1203 DYNPM_ACTION_DOWNCLOCK,
1204 DYNPM_ACTION_UPCLOCK,
1205 DYNPM_ACTION_DEFAULT
1206};
1207
1208enum radeon_voltage_type {
1209 VOLTAGE_NONE = 0,
1210 VOLTAGE_GPIO,
1211 VOLTAGE_VDDC,
1212 VOLTAGE_SW
1213};
1214
1215enum radeon_pm_state_type {
1216 /* not used for dpm */
1217 POWER_STATE_TYPE_DEFAULT,
1218 POWER_STATE_TYPE_POWERSAVE,
1219 /* user selectable states */
1220 POWER_STATE_TYPE_BATTERY,
1221 POWER_STATE_TYPE_BALANCED,
1222 POWER_STATE_TYPE_PERFORMANCE,
1223 /* internal states */
1224 POWER_STATE_TYPE_INTERNAL_UVD,
1225 POWER_STATE_TYPE_INTERNAL_UVD_SD,
1226 POWER_STATE_TYPE_INTERNAL_UVD_HD,
1227 POWER_STATE_TYPE_INTERNAL_UVD_HD2,
1228 POWER_STATE_TYPE_INTERNAL_UVD_MVC,
1229 POWER_STATE_TYPE_INTERNAL_BOOT,
1230 POWER_STATE_TYPE_INTERNAL_THERMAL,
1231 POWER_STATE_TYPE_INTERNAL_ACPI,
1232 POWER_STATE_TYPE_INTERNAL_ULV,
1233 POWER_STATE_TYPE_INTERNAL_3DPERF,
1234};
1235
1236enum radeon_pm_profile_type {
1237 PM_PROFILE_DEFAULT,
1238 PM_PROFILE_AUTO,
1239 PM_PROFILE_LOW,
1240 PM_PROFILE_MID,
1241 PM_PROFILE_HIGH,
1242};
1243
1244#define PM_PROFILE_DEFAULT_IDX0 0
1245#define PM_PROFILE_LOW_SH_IDX1 1
1246#define PM_PROFILE_MID_SH_IDX2 2
1247#define PM_PROFILE_HIGH_SH_IDX3 3
1248#define PM_PROFILE_LOW_MH_IDX4 4
1249#define PM_PROFILE_MID_MH_IDX5 5
1250#define PM_PROFILE_HIGH_MH_IDX6 6
1251#define PM_PROFILE_MAX7 7
1252
1253struct radeon_pm_profile {
1254 int dpms_off_ps_idx;
1255 int dpms_on_ps_idx;
1256 int dpms_off_cm_idx;
1257 int dpms_on_cm_idx;
1258};
1259
1260enum radeon_int_thermal_type {
1261 THERMAL_TYPE_NONE,
1262 THERMAL_TYPE_EXTERNAL,
1263 THERMAL_TYPE_EXTERNAL_GPIO,
1264 THERMAL_TYPE_RV6XX,
1265 THERMAL_TYPE_RV770,
1266 THERMAL_TYPE_ADT7473_WITH_INTERNAL,
1267 THERMAL_TYPE_EVERGREEN,
1268 THERMAL_TYPE_SUMO,
1269 THERMAL_TYPE_NI,
1270 THERMAL_TYPE_SI,
1271 THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1272 THERMAL_TYPE_CI,
1273 THERMAL_TYPE_KV,
1274};
1275
1276struct radeon_voltage {
1277 enum radeon_voltage_type type;
1278 /* gpio voltage */
1279 struct radeon_gpio_rec gpio;
1280 u32 delay; /* delay in usec from voltage drop to sclk change */
1281 bool_Bool active_high; /* voltage drop is active when bit is high */
1282 /* VDDC voltage */
1283 u8 vddc_id; /* index into vddc voltage table */
1284 u8 vddci_id; /* index into vddci voltage table */
1285 bool_Bool vddci_enabled;
1286 /* r6xx+ sw */
1287 u16 voltage;
1288 /* evergreen+ vddci */
1289 u16 vddci;
1290};
1291
1292/* clock mode flags */
1293#define RADEON_PM_MODE_NO_DISPLAY(1 << 0) (1 << 0)
1294
1295struct radeon_pm_clock_info {
1296 /* memory clock */
1297 u32 mclk;
1298 /* engine clock */
1299 u32 sclk;
1300 /* voltage info */
1301 struct radeon_voltage voltage;
1302 /* standardized clock flags */
1303 u32 flags;
1304};
1305
1306/* state flags */
1307#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY(1 << 0) (1 << 0)
1308
1309struct radeon_power_state {
1310 enum radeon_pm_state_type type;
1311 struct radeon_pm_clock_info *clock_info;
1312 /* number of valid clock modes in this power state */
1313 int num_clock_modes;
1314 struct radeon_pm_clock_info *default_clock_mode;
1315 /* standardized state flags */
1316 u32 flags;
1317 u32 misc; /* vbios specific flags */
1318 u32 misc2; /* vbios specific flags */
1319 int pcie_lanes; /* pcie lanes */
1320};
1321
1322/*
1323 * Some modes are overclocked by very low value, accept them
1324 */
1325#define RADEON_MODE_OVERCLOCK_MARGIN500 500 /* 5 MHz */
1326
1327enum radeon_dpm_auto_throttle_src {
1328 RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL,
1329 RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL
1330};
1331
1332enum radeon_dpm_event_src {
1333 RADEON_DPM_EVENT_SRC_ANALOG = 0,
1334 RADEON_DPM_EVENT_SRC_EXTERNAL = 1,
1335 RADEON_DPM_EVENT_SRC_DIGITAL = 2,
1336 RADEON_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
1337 RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
1338};
1339
1340#define RADEON_MAX_VCE_LEVELS6 6
1341
1342enum radeon_vce_level {
1343 RADEON_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
1344 RADEON_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
1345 RADEON_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
1346 RADEON_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
1347 RADEON_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
1348 RADEON_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
1349};
1350
1351struct radeon_ps {
1352 u32 caps; /* vbios flags */
1353 u32 class; /* vbios flags */
1354 u32 class2; /* vbios flags */
1355 /* UVD clocks */
1356 u32 vclk;
1357 u32 dclk;
1358 /* VCE clocks */
1359 u32 evclk;
1360 u32 ecclk;
1361 bool_Bool vce_active;
1362 enum radeon_vce_level vce_level;
1363 /* asic priv */
1364 void *ps_priv;
1365};
1366
1367struct radeon_dpm_thermal {
1368 /* thermal interrupt work */
1369 struct work_struct work;
1370 /* low temperature threshold */
1371 int min_temp;
1372 /* high temperature threshold */
1373 int max_temp;
1374 /* was interrupt low to high or high to low */
1375 bool_Bool high_to_low;
1376};
1377
1378enum radeon_clk_action
1379{
1380 RADEON_SCLK_UP = 1,
1381 RADEON_SCLK_DOWN
1382};
1383
1384struct radeon_blacklist_clocks
1385{
1386 u32 sclk;
1387 u32 mclk;
1388 enum radeon_clk_action action;
1389};
1390
1391struct radeon_clock_and_voltage_limits {
1392 u32 sclk;
1393 u32 mclk;
1394 u16 vddc;
1395 u16 vddci;
1396};
1397
1398struct radeon_clock_array {
1399 u32 count;
1400 u32 *values;
1401};
1402
1403struct radeon_clock_voltage_dependency_entry {
1404 u32 clk;
1405 u16 v;
1406};
1407
1408struct radeon_clock_voltage_dependency_table {
1409 u32 count;
1410 struct radeon_clock_voltage_dependency_entry *entries;
1411};
1412
1413union radeon_cac_leakage_entry {
1414 struct {
1415 u16 vddc;
1416 u32 leakage;
1417 };
1418 struct {
1419 u16 vddc1;
1420 u16 vddc2;
1421 u16 vddc3;
1422 };
1423};
1424
1425struct radeon_cac_leakage_table {
1426 u32 count;
1427 union radeon_cac_leakage_entry *entries;
1428};
1429
1430struct radeon_phase_shedding_limits_entry {
1431 u16 voltage;
1432 u32 sclk;
1433 u32 mclk;
1434};
1435
1436struct radeon_phase_shedding_limits_table {
1437 u32 count;
1438 struct radeon_phase_shedding_limits_entry *entries;
1439};
1440
1441struct radeon_uvd_clock_voltage_dependency_entry {
1442 u32 vclk;
1443 u32 dclk;
1444 u16 v;
1445};
1446
1447struct radeon_uvd_clock_voltage_dependency_table {
1448 u8 count;
1449 struct radeon_uvd_clock_voltage_dependency_entry *entries;
1450};
1451
1452struct radeon_vce_clock_voltage_dependency_entry {
1453 u32 ecclk;
1454 u32 evclk;
1455 u16 v;
1456};
1457
1458struct radeon_vce_clock_voltage_dependency_table {
1459 u8 count;
1460 struct radeon_vce_clock_voltage_dependency_entry *entries;
1461};
1462
1463struct radeon_ppm_table {
1464 u8 ppm_design;
1465 u16 cpu_core_number;
1466 u32 platform_tdp;
1467 u32 small_ac_platform_tdp;
1468 u32 platform_tdc;
1469 u32 small_ac_platform_tdc;
1470 u32 apu_tdp;
1471 u32 dgpu_tdp;
1472 u32 dgpu_ulv_power;
1473 u32 tj_max;
1474};
1475
1476struct radeon_cac_tdp_table {
1477 u16 tdp;
1478 u16 configurable_tdp;
1479 u16 tdc;
1480 u16 battery_power_limit;
1481 u16 small_power_limit;
1482 u16 low_cac_leakage;
1483 u16 high_cac_leakage;
1484 u16 maximum_power_delivery_limit;
1485};
1486
1487struct radeon_dpm_dynamic_state {
1488 struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
1489 struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
1490 struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
1491 struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk;
1492 struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1493 struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
1494 struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
1495 struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
1496 struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
1497 struct radeon_clock_array valid_sclk_values;
1498 struct radeon_clock_array valid_mclk_values;
1499 struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
1500 struct radeon_clock_and_voltage_limits max_clock_voltage_on_ac;
1501 u32 mclk_sclk_ratio;
1502 u32 sclk_mclk_delta;
1503 u16 vddc_vddci_delta;
1504 u16 min_vddc_for_pcie_gen2;
1505 struct radeon_cac_leakage_table cac_leakage_table;
1506 struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
1507 struct radeon_ppm_table *ppm_table;
1508 struct radeon_cac_tdp_table *cac_tdp_table;
1509};
1510
1511struct radeon_dpm_fan {
1512 u16 t_min;
1513 u16 t_med;
1514 u16 t_high;
1515 u16 pwm_min;
1516 u16 pwm_med;
1517 u16 pwm_high;
1518 u8 t_hyst;
1519 u32 cycle_delay;
1520 u16 t_max;
1521 u8 control_mode;
1522 u16 default_max_fan_pwm;
1523 u16 default_fan_output_sensitivity;
1524 u16 fan_output_sensitivity;
1525 bool_Bool ucode_fan_control;
1526};
1527
1528enum radeon_pcie_gen {
1529 RADEON_PCIE_GEN1 = 0,
1530 RADEON_PCIE_GEN2 = 1,
1531 RADEON_PCIE_GEN3 = 2,
1532 RADEON_PCIE_GEN_INVALID = 0xffff
1533};
1534
1535enum radeon_dpm_forced_level {
1536 RADEON_DPM_FORCED_LEVEL_AUTO = 0,
1537 RADEON_DPM_FORCED_LEVEL_LOW = 1,
1538 RADEON_DPM_FORCED_LEVEL_HIGH = 2,
1539};
1540
1541struct radeon_vce_state {
1542 /* vce clocks */
1543 u32 evclk;
1544 u32 ecclk;
1545 /* gpu clocks */
1546 u32 sclk;
1547 u32 mclk;
1548 u8 clk_idx;
1549 u8 pstate;
1550};
1551
1552struct radeon_dpm {
1553 struct radeon_ps *ps;
1554 /* number of valid power states */
1555 int num_ps;
1556 /* current power state that is active */
1557 struct radeon_ps *current_ps;
1558 /* requested power state */
1559 struct radeon_ps *requested_ps;
1560 /* boot up power state */
1561 struct radeon_ps *boot_ps;
1562 /* default uvd power state */
1563 struct radeon_ps *uvd_ps;
1564 /* vce requirements */
1565 struct radeon_vce_state vce_states[RADEON_MAX_VCE_LEVELS6];
1566 enum radeon_vce_level vce_level;
1567 enum radeon_pm_state_type state;
1568 enum radeon_pm_state_type user_state;
1569 u32 platform_caps;
1570 u32 voltage_response_time;
1571 u32 backbias_response_time;
1572 void *priv;
1573 u32 new_active_crtcs;
1574 int new_active_crtc_count;
1575 int high_pixelclock_count;
1576 u32 current_active_crtcs;
1577 int current_active_crtc_count;
1578 bool_Bool single_display;
1579 struct radeon_dpm_dynamic_state dyn_state;
1580 struct radeon_dpm_fan fan;
1581 u32 tdp_limit;
1582 u32 near_tdp_limit;
1583 u32 near_tdp_limit_adjusted;
1584 u32 sq_ramping_threshold;
1585 u32 cac_leakage;
1586 u16 tdp_od_limit;
1587 u32 tdp_adjustment;
1588 u16 load_line_slope;
1589 bool_Bool power_control;
1590 bool_Bool ac_power;
1591 /* special states active */
1592 bool_Bool thermal_active;
1593 bool_Bool uvd_active;
1594 bool_Bool vce_active;
1595 /* thermal handling */
1596 struct radeon_dpm_thermal thermal;
1597 /* forced levels */
1598 enum radeon_dpm_forced_level forced_level;
1599 /* track UVD streams */
1600 unsigned sd;
1601 unsigned hd;
1602};
1603
1604void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool_Bool enable);
1605void radeon_dpm_enable_vce(struct radeon_device *rdev, bool_Bool enable);
1606
1607struct radeon_pm {
1608 struct rwlock mutex;
1609 /* write locked while reprogramming mclk */
1610 struct rwlock mclk_lock;
1611 u32 active_crtcs;
1612 int active_crtc_count;
1613 int req_vblank;
1614 bool_Bool vblank_sync;
1615 fixed20_12 max_bandwidth;
1616 fixed20_12 igp_sideport_mclk;
1617 fixed20_12 igp_system_mclk;
1618 fixed20_12 igp_ht_link_clk;
1619 fixed20_12 igp_ht_link_width;
1620 fixed20_12 k8_bandwidth;
1621 fixed20_12 sideport_bandwidth;
1622 fixed20_12 ht_bandwidth;
1623 fixed20_12 core_bandwidth;
1624 fixed20_12 sclk;
1625 fixed20_12 mclk;
1626 fixed20_12 needed_bandwidth;
1627 struct radeon_power_state *power_state;
1628 /* number of valid power states */
1629 int num_power_states;
1630 int current_power_state_index;
1631 int current_clock_mode_index;
1632 int requested_power_state_index;
1633 int requested_clock_mode_index;
1634 int default_power_state_index;
1635 u32 current_sclk;
1636 u32 current_mclk;
1637 u16 current_vddc;
1638 u16 current_vddci;
1639 u32 default_sclk;
1640 u32 default_mclk;
1641 u16 default_vddc;
1642 u16 default_vddci;
1643 struct radeon_i2c_chan *i2c_bus;
1644 /* selected pm method */
1645 enum radeon_pm_method pm_method;
1646 /* dynpm power management */
1647 struct delayed_work dynpm_idle_work;
1648 enum radeon_dynpm_state dynpm_state;
1649 enum radeon_dynpm_action dynpm_planned_action;
1650 unsigned long dynpm_action_timeout;
1651 bool_Bool dynpm_can_upclock;
1652 bool_Bool dynpm_can_downclock;
1653 /* profile-based power management */
1654 enum radeon_pm_profile_type profile;
1655 int profile_index;
1656 struct radeon_pm_profile profiles[PM_PROFILE_MAX7];
1657 /* internal thermal controller on rv6xx+ */
1658 enum radeon_int_thermal_type int_thermal_type;
1659 struct device *int_hwmon_dev;
1660 /* fan control parameters */
1661 bool_Bool no_fan;
1662 u8 fan_pulses_per_revolution;
1663 u8 fan_min_rpm;
1664 u8 fan_max_rpm;
1665 /* dpm */
1666 bool_Bool dpm_enabled;
1667 bool_Bool sysfs_initialized;
1668 struct radeon_dpm dpm;
1669};
1670
1671#define RADEON_PCIE_SPEED_251 1
1672#define RADEON_PCIE_SPEED_502 2
1673#define RADEON_PCIE_SPEED_804 4
1674
1675int radeon_pm_get_type_index(struct radeon_device *rdev,
1676 enum radeon_pm_state_type ps_type,
1677 int instance);
1678/*
1679 * UVD
1680 */
1681#define RADEON_DEFAULT_UVD_HANDLES10 10
1682#define RADEON_MAX_UVD_HANDLES30 30
1683#define RADEON_UVD_STACK_SIZE(200*1024) (200*1024)
1684#define RADEON_UVD_HEAP_SIZE(256*1024) (256*1024)
1685#define RADEON_UVD_SESSION_SIZE(50*1024) (50*1024)
1686
1687struct radeon_uvd {
1688 bool_Bool fw_header_present;
1689 struct radeon_bo *vcpu_bo;
1690 void *cpu_addr;
1691 uint64_t gpu_addr;
1692 unsigned max_handles;
1693 atomic_t handles[RADEON_MAX_UVD_HANDLES30];
1694 struct drm_file *filp[RADEON_MAX_UVD_HANDLES30];
1695 unsigned img_size[RADEON_MAX_UVD_HANDLES30];
1696 struct delayed_work idle_work;
1697};
1698
1699int radeon_uvd_init(struct radeon_device *rdev);
1700void radeon_uvd_fini(struct radeon_device *rdev);
1701int radeon_uvd_suspend(struct radeon_device *rdev);
1702int radeon_uvd_resume(struct radeon_device *rdev);
1703int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
1704 uint32_t handle, struct radeon_fence **fence);
1705int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
1706 uint32_t handle, struct radeon_fence **fence);
1707void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
1708 uint32_t allowed_domains);
1709void radeon_uvd_free_handles(struct radeon_device *rdev,
1710 struct drm_file *filp);
1711int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
1712void radeon_uvd_note_usage(struct radeon_device *rdev);
1713int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
1714 unsigned vclk, unsigned dclk,
1715 unsigned vco_min, unsigned vco_max,
1716 unsigned fb_factor, unsigned fb_mask,
1717 unsigned pd_min, unsigned pd_max,
1718 unsigned pd_even,
1719 unsigned *optimal_fb_div,
1720 unsigned *optimal_vclk_div,
1721 unsigned *optimal_dclk_div);
1722int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
1723 unsigned cg_upll_func_cntl);
1724
1725/*
1726 * VCE
1727 */
1728#define RADEON_MAX_VCE_HANDLES16 16
1729
1730struct radeon_vce {
1731 struct radeon_bo *vcpu_bo;
1732 uint64_t gpu_addr;
1733 unsigned fw_version;
1734 unsigned fb_version;
1735 atomic_t handles[RADEON_MAX_VCE_HANDLES16];
1736 struct drm_file *filp[RADEON_MAX_VCE_HANDLES16];
1737 unsigned img_size[RADEON_MAX_VCE_HANDLES16];
1738 struct delayed_work idle_work;
1739 uint32_t keyselect;
1740};
1741
1742int radeon_vce_init(struct radeon_device *rdev);
1743void radeon_vce_fini(struct radeon_device *rdev);
1744int radeon_vce_suspend(struct radeon_device *rdev);
1745int radeon_vce_resume(struct radeon_device *rdev);
1746int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
1747 uint32_t handle, struct radeon_fence **fence);
1748int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
1749 uint32_t handle, struct radeon_fence **fence);
1750void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
1751void radeon_vce_note_usage(struct radeon_device *rdev);
1752int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
1753int radeon_vce_cs_parse(struct radeon_cs_parser *p);
1754bool_Bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
1755 struct radeon_ring *ring,
1756 struct radeon_semaphore *semaphore,
1757 bool_Bool emit_wait);
1758void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
1759void radeon_vce_fence_emit(struct radeon_device *rdev,
1760 struct radeon_fence *fence);
1761int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
1762int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
1763
1764struct r600_audio_pin {
1765 int channels;
1766 int rate;
1767 int bits_per_sample;
1768 u8 status_bits;
1769 u8 category_code;
1770 u32 offset;
1771 bool_Bool connected;
1772 u32 id;
1773};
1774
1775struct r600_audio {
1776 bool_Bool enabled;
1777 struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS7];
1778 int num_pins;
1779 struct radeon_audio_funcs *hdmi_funcs;
1780 struct radeon_audio_funcs *dp_funcs;
1781 struct radeon_audio_basic_funcs *funcs;
1782};
1783
1784/*
1785 * Benchmarking
1786 */
1787void radeon_benchmark(struct radeon_device *rdev, int test_number);
1788
1789
1790/*
1791 * Testing
1792 */
1793void radeon_test_moves(struct radeon_device *rdev);
1794void radeon_test_ring_sync(struct radeon_device *rdev,
1795 struct radeon_ring *cpA,
1796 struct radeon_ring *cpB);
1797void radeon_test_syncing(struct radeon_device *rdev);
1798
1799/*
1800 * MMU Notifier
1801 */
1802#if defined(CONFIG_MMU_NOTIFIER)
1803int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
1804void radeon_mn_unregister(struct radeon_bo *bo);
1805#else
1806static inline int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
1807{
1808 return -ENODEV19;
1809}
1810static inline void radeon_mn_unregister(struct radeon_bo *bo) {}
1811#endif
1812
1813/*
1814 * Debugfs
1815 */
1816struct radeon_debugfs {
1817 struct drm_info_list *files;
1818 unsigned num_files;
1819};
1820
1821int radeon_debugfs_add_files(struct radeon_device *rdev,
1822 struct drm_info_list *files,
1823 unsigned nfiles);
1824int radeon_debugfs_fence_init(struct radeon_device *rdev);
1825
1826/*
1827 * ASIC ring specific functions.
1828 */
1829struct radeon_asic_ring {
1830 /* ring read/write ptr handling */
1831 u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1832 u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1833 void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
1834
1835 /* validating and patching of IBs */
1836 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
1837 int (*cs_parse)(struct radeon_cs_parser *p);
1838
1839 /* command emmit functions */
1840 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
1841 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
1842 void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
1843 bool_Bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1844 struct radeon_semaphore *semaphore, bool_Bool emit_wait);
1845 void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
1846 unsigned vm_id, uint64_t pd_addr);
1847
1848 /* testing functions */
1849 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1850 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1851 bool_Bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1852
1853 /* deprecated */
1854 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1855};
1856
1857/*
1858 * ASIC specific functions.
1859 */
1860struct radeon_asic {
1861 int (*init)(struct radeon_device *rdev);
1862 void (*fini)(struct radeon_device *rdev);
1863 int (*resume)(struct radeon_device *rdev);
1864 int (*suspend)(struct radeon_device *rdev);
1865 void (*vga_set_state)(struct radeon_device *rdev, bool_Bool state);
1866 int (*asic_reset)(struct radeon_device *rdev, bool_Bool hard);
1867 /* Flush the HDP cache via MMIO */
1868 void (*mmio_hdp_flush)(struct radeon_device *rdev);
1869 /* check if 3D engine is idle */
1870 bool_Bool (*gui_idle)(struct radeon_device *rdev);
1871 /* wait for mc_idle */
1872 int (*mc_wait_for_idle)(struct radeon_device *rdev);
1873 /* get the reference clock */
1874 u32 (*get_xclk)(struct radeon_device *rdev);
1875 /* get the gpu clock counter */
1876 uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
1877 /* get register for info ioctl */
1878 int (*get_allowed_info_register)(struct radeon_device *rdev, u32 reg, u32 *val);
1879 /* gart */
1880 struct {
1881 void (*tlb_flush)(struct radeon_device *rdev);
1882 uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
1883 void (*set_page)(struct radeon_device *rdev, unsigned i,
1884 uint64_t entry);
1885 } gart;
1886 struct {
1887 int (*init)(struct radeon_device *rdev);
1888 void (*fini)(struct radeon_device *rdev);
1889 void (*copy_pages)(struct radeon_device *rdev,
1890 struct radeon_ib *ib,
1891 uint64_t pe, uint64_t src,
1892 unsigned count);
1893 void (*write_pages)(struct radeon_device *rdev,
1894 struct radeon_ib *ib,
1895 uint64_t pe,
1896 uint64_t addr, unsigned count,
1897 uint32_t incr, uint32_t flags);
1898 void (*set_pages)(struct radeon_device *rdev,
1899 struct radeon_ib *ib,
1900 uint64_t pe,
1901 uint64_t addr, unsigned count,
1902 uint32_t incr, uint32_t flags);
1903 void (*pad_ib)(struct radeon_ib *ib);
1904 } vm;
1905 /* ring specific callbacks */
1906 const struct radeon_asic_ring *ring[RADEON_NUM_RINGS8];
1907 /* irqs */
1908 struct {
1909 int (*set)(struct radeon_device *rdev);
1910 int (*process)(struct radeon_device *rdev);
1911 } irq;
1912 /* displays */
1913 struct {
1914 /* display watermarks */
1915 void (*bandwidth_update)(struct radeon_device *rdev);
1916 /* get frame count */
1917 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
1918 /* wait for vblank */
1919 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
1920 /* set backlight level */
1921 void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
1922 /* get backlight level */
1923 u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
1924 /* audio callbacks */
1925 void (*hdmi_enable)(struct drm_encoder *encoder, bool_Bool enable);
1926 void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
1927 } display;
1928 /* copy functions for bo handling */
1929 struct {
1930 struct radeon_fence *(*blit)(struct radeon_device *rdev,
1931 uint64_t src_offset,
1932 uint64_t dst_offset,
1933 unsigned num_gpu_pages,
1934 struct dma_resv *resv);
1935 u32 blit_ring_index;
1936 struct radeon_fence *(*dma)(struct radeon_device *rdev,
1937 uint64_t src_offset,
1938 uint64_t dst_offset,
1939 unsigned num_gpu_pages,
1940 struct dma_resv *resv);
1941 u32 dma_ring_index;
1942 /* method used for bo copy */
1943 struct radeon_fence *(*copy)(struct radeon_device *rdev,
1944 uint64_t src_offset,
1945 uint64_t dst_offset,
1946 unsigned num_gpu_pages,
1947 struct dma_resv *resv);
1948 /* ring used for bo copies */
1949 u32 copy_ring_index;
1950 } copy;
1951 /* surfaces */
1952 struct {
1953 int (*set_reg)(struct radeon_device *rdev, int reg,
1954 uint32_t tiling_flags, uint32_t pitch,
1955 uint32_t offset, uint32_t obj_size);
1956 void (*clear_reg)(struct radeon_device *rdev, int reg);
1957 } surface;
1958 /* hotplug detect */
1959 struct {
1960 void (*init)(struct radeon_device *rdev);
1961 void (*fini)(struct radeon_device *rdev);
1962 bool_Bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1963 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1964 } hpd;
1965 /* static power management */
1966 struct {
1967 void (*misc)(struct radeon_device *rdev);
1968 void (*prepare)(struct radeon_device *rdev);
1969 void (*finish)(struct radeon_device *rdev);
1970 void (*init_profile)(struct radeon_device *rdev);
1971 void (*get_dynpm_state)(struct radeon_device *rdev);
1972 uint32_t (*get_engine_clock)(struct radeon_device *rdev);
1973 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
1974 uint32_t (*get_memory_clock)(struct radeon_device *rdev);
1975 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
1976 int (*get_pcie_lanes)(struct radeon_device *rdev);
1977 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
1978 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
1979 int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
1980 int (*set_vce_clocks)(struct radeon_device *rdev, u32 evclk, u32 ecclk);
1981 int (*get_temperature)(struct radeon_device *rdev);
1982 } pm;
1983 /* dynamic power management */
1984 struct {
1985 int (*init)(struct radeon_device *rdev);
1986 void (*setup_asic)(struct radeon_device *rdev);
1987 int (*enable)(struct radeon_device *rdev);
1988 int (*late_enable)(struct radeon_device *rdev);
1989 void (*disable)(struct radeon_device *rdev);
1990 int (*pre_set_power_state)(struct radeon_device *rdev);
1991 int (*set_power_state)(struct radeon_device *rdev);
1992 void (*post_set_power_state)(struct radeon_device *rdev);
1993 void (*display_configuration_changed)(struct radeon_device *rdev);
1994 void (*fini)(struct radeon_device *rdev);
1995 u32 (*get_sclk)(struct radeon_device *rdev, bool_Bool low);
1996 u32 (*get_mclk)(struct radeon_device *rdev, bool_Bool low);
1997 void (*print_power_state)(struct radeon_device *rdev, struct radeon_ps *ps);
1998 void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
1999 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
2000 bool_Bool (*vblank_too_short)(struct radeon_device *rdev);
2001 void (*powergate_uvd)(struct radeon_device *rdev, bool_Bool gate);
2002 void (*enable_bapm)(struct radeon_device *rdev, bool_Bool enable);
2003 void (*fan_ctrl_set_mode)(struct radeon_device *rdev, u32 mode);
2004 u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev);
2005 int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed);
2006 int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed);
2007 u32 (*get_current_sclk)(struct radeon_device *rdev);
2008 u32 (*get_current_mclk)(struct radeon_device *rdev);
2009 } dpm;
2010 /* pageflipping */
2011 struct {
2012 void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base, bool_Bool async);
2013 bool_Bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
2014 } pflip;
2015};
2016
2017/*
2018 * Asic structures
2019 */
2020struct r100_asic {
2021 const unsigned *reg_safe_bm;
2022 unsigned reg_safe_bm_size;
2023 u32 hdp_cntl;
2024};
2025
2026struct r300_asic {
2027 const unsigned *reg_safe_bm;
2028 unsigned reg_safe_bm_size;
2029 u32 resync_scratch;
2030 u32 hdp_cntl;
2031};
2032
2033struct r600_asic {
2034 unsigned max_pipes;
2035 unsigned max_tile_pipes;
2036 unsigned max_simds;
2037 unsigned max_backends;
2038 unsigned max_gprs;
2039 unsigned max_threads;
2040 unsigned max_stack_entries;
2041 unsigned max_hw_contexts;
2042 unsigned max_gs_threads;
2043 unsigned sx_max_export_size;
2044 unsigned sx_max_export_pos_size;
2045 unsigned sx_max_export_smx_size;
2046 unsigned sq_num_cf_insts;
2047 unsigned tiling_nbanks;
2048 unsigned tiling_npipes;
2049 unsigned tiling_group_size;
2050 unsigned tile_config;
2051 unsigned backend_map;
2052 unsigned active_simds;
2053};
2054
2055struct rv770_asic {
2056 unsigned max_pipes;
2057 unsigned max_tile_pipes;
2058 unsigned max_simds;
2059 unsigned max_backends;
2060 unsigned max_gprs;
2061 unsigned max_threads;
2062 unsigned max_stack_entries;
2063 unsigned max_hw_contexts;
2064 unsigned max_gs_threads;
2065 unsigned sx_max_export_size;
2066 unsigned sx_max_export_pos_size;
2067 unsigned sx_max_export_smx_size;
2068 unsigned sq_num_cf_insts;
2069 unsigned sx_num_of_sets;
2070 unsigned sc_prim_fifo_size;
2071 unsigned sc_hiz_tile_fifo_size;
2072 unsigned sc_earlyz_tile_fifo_fize;
2073 unsigned tiling_nbanks;
2074 unsigned tiling_npipes;
2075 unsigned tiling_group_size;
2076 unsigned tile_config;
2077 unsigned backend_map;
2078 unsigned active_simds;
2079};
2080
2081struct evergreen_asic {
2082 unsigned num_ses;
2083 unsigned max_pipes;
2084 unsigned max_tile_pipes;
2085 unsigned max_simds;
2086 unsigned max_backends;
2087 unsigned max_gprs;
2088 unsigned max_threads;
2089 unsigned max_stack_entries;
2090 unsigned max_hw_contexts;
2091 unsigned max_gs_threads;
2092 unsigned sx_max_export_size;
2093 unsigned sx_max_export_pos_size;
2094 unsigned sx_max_export_smx_size;
2095 unsigned sq_num_cf_insts;
2096 unsigned sx_num_of_sets;
2097 unsigned sc_prim_fifo_size;
2098 unsigned sc_hiz_tile_fifo_size;
2099 unsigned sc_earlyz_tile_fifo_size;
2100 unsigned tiling_nbanks;
2101 unsigned tiling_npipes;
2102 unsigned tiling_group_size;
2103 unsigned tile_config;
2104 unsigned backend_map;
2105 unsigned active_simds;
2106};
2107
2108struct cayman_asic {
2109 unsigned max_shader_engines;
2110 unsigned max_pipes_per_simd;
2111 unsigned max_tile_pipes;
2112 unsigned max_simds_per_se;
2113 unsigned max_backends_per_se;
2114 unsigned max_texture_channel_caches;
2115 unsigned max_gprs;
2116 unsigned max_threads;
2117 unsigned max_gs_threads;
2118 unsigned max_stack_entries;
2119 unsigned sx_num_of_sets;
2120 unsigned sx_max_export_size;
2121 unsigned sx_max_export_pos_size;
2122 unsigned sx_max_export_smx_size;
2123 unsigned max_hw_contexts;
2124 unsigned sq_num_cf_insts;
2125 unsigned sc_prim_fifo_size;
2126 unsigned sc_hiz_tile_fifo_size;
2127 unsigned sc_earlyz_tile_fifo_size;
2128
2129 unsigned num_shader_engines;
2130 unsigned num_shader_pipes_per_simd;
2131 unsigned num_tile_pipes;
2132 unsigned num_simds_per_se;
2133 unsigned num_backends_per_se;
2134 unsigned backend_disable_mask_per_asic;
2135 unsigned backend_map;
2136 unsigned num_texture_channel_caches;
2137 unsigned mem_max_burst_length_bytes;
2138 unsigned mem_row_size_in_kb;
2139 unsigned shader_engine_tile_size;
2140 unsigned num_gpus;
2141 unsigned multi_gpu_tile_size;
2142
2143 unsigned tile_config;
2144 unsigned active_simds;
2145};
2146
2147struct si_asic {
2148 unsigned max_shader_engines;
2149 unsigned max_tile_pipes;
2150 unsigned max_cu_per_sh;
2151 unsigned max_sh_per_se;
2152 unsigned max_backends_per_se;
2153 unsigned max_texture_channel_caches;
2154 unsigned max_gprs;
2155 unsigned max_gs_threads;
2156 unsigned max_hw_contexts;
2157 unsigned sc_prim_fifo_size_frontend;
2158 unsigned sc_prim_fifo_size_backend;
2159 unsigned sc_hiz_tile_fifo_size;
2160 unsigned sc_earlyz_tile_fifo_size;
2161
2162 unsigned num_tile_pipes;
2163 unsigned backend_enable_mask;
2164 unsigned backend_disable_mask_per_asic;
2165 unsigned backend_map;
2166 unsigned num_texture_channel_caches;
2167 unsigned mem_max_burst_length_bytes;
2168 unsigned mem_row_size_in_kb;
2169 unsigned shader_engine_tile_size;
2170 unsigned num_gpus;
2171 unsigned multi_gpu_tile_size;
2172
2173 unsigned tile_config;
2174 uint32_t tile_mode_array[32];
2175 uint32_t active_cus;
2176};
2177
2178struct cik_asic {
2179 unsigned max_shader_engines;
2180 unsigned max_tile_pipes;
2181 unsigned max_cu_per_sh;
2182 unsigned max_sh_per_se;
2183 unsigned max_backends_per_se;
2184 unsigned max_texture_channel_caches;
2185 unsigned max_gprs;
2186 unsigned max_gs_threads;
2187 unsigned max_hw_contexts;
2188 unsigned sc_prim_fifo_size_frontend;
2189 unsigned sc_prim_fifo_size_backend;
2190 unsigned sc_hiz_tile_fifo_size;
2191 unsigned sc_earlyz_tile_fifo_size;
2192
2193 unsigned num_tile_pipes;
2194 unsigned backend_enable_mask;
2195 unsigned backend_disable_mask_per_asic;
2196 unsigned backend_map;
2197 unsigned num_texture_channel_caches;
2198 unsigned mem_max_burst_length_bytes;
2199 unsigned mem_row_size_in_kb;
2200 unsigned shader_engine_tile_size;
2201 unsigned num_gpus;
2202 unsigned multi_gpu_tile_size;
2203
2204 unsigned tile_config;
2205 uint32_t tile_mode_array[32];
2206 uint32_t macrotile_mode_array[16];
2207 uint32_t active_cus;
2208};
2209
2210union radeon_asic_config {
2211 struct r300_asic r300;
2212 struct r100_asic r100;
2213 struct r600_asic r600;
2214 struct rv770_asic rv770;
2215 struct evergreen_asic evergreen;
2216 struct cayman_asic cayman;
2217 struct si_asic si;
2218 struct cik_asic cik;
2219};
2220
2221/*
2222 * asic initizalization from radeon_asic.c
2223 */
2224void radeon_agp_disable(struct radeon_device *rdev);
2225int radeon_asic_init(struct radeon_device *rdev);
2226
2227
2228/*
2229 * IOCTL.
2230 */
2231int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
2232 struct drm_file *filp);
2233int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
2234 struct drm_file *filp);
2235int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
2236 struct drm_file *filp);
2237int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
2238 struct drm_file *file_priv);
2239int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
2240 struct drm_file *file_priv);
2241int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
2242 struct drm_file *file_priv);
2243int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
2244 struct drm_file *file_priv);
2245int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
2246 struct drm_file *filp);
2247int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
2248 struct drm_file *filp);
2249int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
2250 struct drm_file *filp);
2251int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
2252 struct drm_file *filp);
2253int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
2254 struct drm_file *filp);
2255int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
2256 struct drm_file *filp);
2257int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
2258int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
2259 struct drm_file *filp);
2260int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
2261 struct drm_file *filp);
2262
2263/* VRAM scratch page for HDP bug, default vram page */
2264struct r600_vram_scratch {
2265 struct radeon_bo *robj;
2266 volatile uint32_t *ptr;
2267 u64 gpu_addr;
2268};
2269
2270/*
2271 * ACPI
2272 */
2273struct radeon_atif_notification_cfg {
2274 bool_Bool enabled;
2275 int command_code;
2276};
2277
2278struct radeon_atif_notifications {
2279 bool_Bool display_switch;
2280 bool_Bool expansion_mode_change;
2281 bool_Bool thermal_state;
2282 bool_Bool forced_power_state;
2283 bool_Bool system_power_state;
2284 bool_Bool display_conf_change;
2285 bool_Bool px_gfx_switch;
2286 bool_Bool brightness_change;
2287 bool_Bool dgpu_display_event;
2288};
2289
2290struct radeon_atif_functions {
2291 bool_Bool system_params;
2292 bool_Bool sbios_requests;
2293 bool_Bool select_active_disp;
2294 bool_Bool lid_state;
2295 bool_Bool get_tv_standard;
2296 bool_Bool set_tv_standard;
2297 bool_Bool get_panel_expansion_mode;
2298 bool_Bool set_panel_expansion_mode;
2299 bool_Bool temperature_change;
2300 bool_Bool graphics_device_types;
2301};
2302
2303struct radeon_atif {
2304 struct radeon_atif_notifications notifications;
2305 struct radeon_atif_functions functions;
2306 struct radeon_atif_notification_cfg notification_cfg;
2307 struct radeon_encoder *encoder_for_bl;
2308};
2309
2310struct radeon_atcs_functions {
2311 bool_Bool get_ext_state;
2312 bool_Bool pcie_perf_req;
2313 bool_Bool pcie_dev_rdy;
2314 bool_Bool pcie_bus_width;
2315};
2316
2317struct radeon_atcs {
2318 struct radeon_atcs_functions functions;
2319};
2320
2321/*
2322 * Core structure, functions and helpers.
2323 */
2324typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
2325typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
2326
2327struct radeon_device {
2328 struct device self;
2329 struct device *dev;
2330 struct drm_device *ddev;
2331 struct pci_dev *pdev;
2332 struct rwlock exclusive_lock;
2333
2334 pci_chipset_tag_t pc;
2335 pcitag_t pa_tag;
2336 pci_intr_handle_t intrh;
2337 bus_space_tag_t iot;
2338 bus_space_tag_t memt;
2339 bus_dma_tag_t dmat;
2340 void *irqh;
2341
2342 void (*switchcb)(void *, int, int);
2343 void *switchcbarg;
2344 void *switchcookie;
2345 struct task switchtask;
2346 struct rasops_info ro;
2347 int console;
2348 int primary;
2349
2350 struct task burner_task;
2351 int burner_fblank;
2352
2353#ifdef __sparc64__
2354 struct sunfb sf;
2355 bus_size_t fb_offset;
2356 bus_space_handle_t memh;
2357#endif
2358
2359 unsigned long fb_aper_offset;
2360 unsigned long fb_aper_size;
2361
2362 /* ASIC */
2363 union radeon_asic_config config;
2364 enum radeon_family family;
2365 unsigned long flags;
2366 int usec_timeout;
2367 enum radeon_pll_errata pll_errata;
2368 int num_gb_pipes;
2369 int num_z_pipes;
2370 int disp_priority;
2371 /* BIOS */
2372 uint8_t *bios;
2373 bool_Bool is_atom_bios;
2374 uint16_t bios_header_start;
2375 struct radeon_bo *stolen_vga_memory;
2376 /* Register mmio */
2377 resource_size_t rmmio_base;
2378 resource_size_t rmmio_size;
2379 /* protects concurrent MM_INDEX/DATA based register access */
2380 spinlock_t mmio_idx_lock;
2381 /* protects concurrent SMC based register access */
2382 spinlock_t smc_idx_lock;
2383 /* protects concurrent PLL register access */
2384 spinlock_t pll_idx_lock;
2385 /* protects concurrent MC register access */
2386 spinlock_t mc_idx_lock;
2387 /* protects concurrent PCIE register access */
2388 spinlock_t pcie_idx_lock;
2389 /* protects concurrent PCIE_PORT register access */
2390 spinlock_t pciep_idx_lock;
2391 /* protects concurrent PIF register access */
2392 spinlock_t pif_idx_lock;
2393 /* protects concurrent CG register access */
2394 spinlock_t cg_idx_lock;
2395 /* protects concurrent UVD register access */
2396 spinlock_t uvd_idx_lock;
2397 /* protects concurrent RCU register access */
2398 spinlock_t rcu_idx_lock;
2399 /* protects concurrent DIDT register access */
2400 spinlock_t didt_idx_lock;
2401 /* protects concurrent ENDPOINT (audio) register access */
2402 spinlock_t end_idx_lock;
2403 bus_space_handle_t rmmio_bsh;
2404 void __iomem *rmmio;
2405 radeon_rreg_t mc_rreg;
2406 radeon_wreg_t mc_wreg;
2407 radeon_rreg_t pll_rreg;
2408 radeon_wreg_t pll_wreg;
2409 uint32_t pcie_reg_mask;
2410 radeon_rreg_t pciep_rreg;
2411 radeon_wreg_t pciep_wreg;
2412 /* io port */
2413 bus_space_handle_t rio_mem;
2414 resource_size_t rio_mem_size;
2415 struct radeon_clock clock;
2416 struct radeon_mc mc;
2417 struct radeon_gart gart;
2418 struct radeon_mode_info mode_info;
2419 struct radeon_scratch scratch;
2420 struct radeon_doorbell doorbell;
2421 struct radeon_mman mman;
2422 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS8];
2423 wait_queue_head_t fence_queue;
2424 u64 fence_context;
2425 struct rwlock ring_lock;
2426 struct radeon_ring ring[RADEON_NUM_RINGS8];
2427 bool_Bool ib_pool_ready;
2428 struct radeon_sa_manager ring_tmp_bo;
2429 struct radeon_irq irq;
2430 struct radeon_asic *asic;
2431 struct radeon_gem gem;
2432 struct radeon_pm pm;
2433 struct radeon_uvd uvd;
2434 struct radeon_vce vce;
2435 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH8];
2436 struct radeon_wb wb;
2437 struct radeon_dummy_page dummy_page;
2438 bool_Bool shutdown;
2439 bool_Bool need_swiotlb;
2440 bool_Bool accel_working;
2441 bool_Bool fastfb_working; /* IGP feature*/
2442 bool_Bool needs_reset, in_reset;
2443 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES8];
2444 const struct firmware *me_fw; /* all family ME firmware */
2445 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
2446 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
2447 const struct firmware *mc_fw; /* NI MC firmware */
2448 const struct firmware *ce_fw; /* SI CE firmware */
2449 const struct firmware *mec_fw; /* CIK MEC firmware */
2450 const struct firmware *mec2_fw; /* KV MEC2 firmware */
2451 const struct firmware *sdma_fw; /* CIK SDMA firmware */
2452 const struct firmware *smc_fw; /* SMC firmware */
2453 const struct firmware *uvd_fw; /* UVD firmware */
2454 const struct firmware *vce_fw; /* VCE firmware */
2455 bool_Bool new_fw;
2456 struct r600_vram_scratch vram_scratch;
2457 int msi_enabled; /* msi enabled */
2458 struct r600_ih ih; /* r6/700 interrupt ring */
2459 struct radeon_rlc rlc;
2460 struct radeon_mec mec;
2461 struct delayed_work hotplug_work;
2462 struct work_struct dp_work;
2463 struct work_struct audio_work;
2464 int num_crtc; /* number of crtcs */
2465 struct rwlock dc_hw_i2c_mutex; /* display controller hw i2c mutex */
2466 bool_Bool has_uvd;
2467 bool_Bool has_vce;
2468 struct r600_audio audio; /* audio stuff */
2469 struct notifier_block acpi_nb;
2470 /* only one userspace can use Hyperz features or CMASK at a time */
2471 struct drm_file *hyperz_filp;
2472 struct drm_file *cmask_filp;
2473 /* i2c buses */
2474 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS16];
2475 /* debugfs */
2476 struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS32];
2477 unsigned debugfs_count;
2478 /* virtual memory */
2479 struct radeon_vm_manager vm_manager;
2480 struct rwlock gpu_clock_mutex;
2481 /* memory stats */
2482 atomic64_t vram_usage;
2483 atomic64_t gtt_usage;
2484 atomic64_t num_bytes_moved;
2485 atomic_t gpu_reset_counter;
2486 /* ACPI interface */
2487 struct radeon_atif atif;
2488 struct radeon_atcs atcs;
2489 /* srbm instance registers */
2490 struct rwlock srbm_mutex;
2491 /* clock, powergating flags */
2492 u32 cg_flags;
2493 u32 pg_flags;
2494
2495 struct dev_pm_domain vga_pm_domain;
2496 bool_Bool have_disp_power_ref;
2497 u32 px_quirk_flags;
2498
2499 /* tracking pinned memory */
2500 u64 vram_pin_size;
2501 u64 gart_pin_size;
2502};
2503
2504bool_Bool radeon_is_px(struct drm_device *dev);
2505int radeon_device_init(struct radeon_device *rdev,
2506 struct drm_device *ddev,
2507 struct pci_dev *pdev,
2508 uint32_t flags);
2509void radeon_device_fini(struct radeon_device *rdev);
2510int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
2511
2512#define RADEON_MIN_MMIO_SIZE0x10000 0x10000
2513
2514uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg);
2515void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v);
2516static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
2517 bool_Bool always_indirect)
2518{
2519 /* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
2520 if ((reg
19.1
'reg' is < field 'rmmio_size'
82.1
'reg' is >= field 'rmmio_size'
92.1
'reg' is >= field 'rmmio_size'
163.1
'reg' is < field 'rmmio_size'
270.1
'reg' is >= field 'rmmio_size'
19.1
'reg' is < field 'rmmio_size'
82.1
'reg' is >= field 'rmmio_size'
92.1
'reg' is >= field 'rmmio_size'
163.1
'reg' is < field 'rmmio_size'
270.1
'reg' is >= field 'rmmio_size'
19.1
'reg' is < field 'rmmio_size'
82.1
'reg' is >= field 'rmmio_size'
92.1
'reg' is >= field 'rmmio_size'
163.1
'reg' is < field 'rmmio_size'
270.1
'reg' is >= field 'rmmio_size'
< rdev->rmmio_size
|| reg
60.1
'reg' is < RADEON_MIN_MMIO_SIZE
74.1
'reg' is < RADEON_MIN_MMIO_SIZE
82.2
'reg' is < RADEON_MIN_MMIO_SIZE
92.2
'reg' is < RADEON_MIN_MMIO_SIZE
270.2
'reg' is < RADEON_MIN_MMIO_SIZE
60.1
'reg' is < RADEON_MIN_MMIO_SIZE
74.1
'reg' is < RADEON_MIN_MMIO_SIZE
82.2
'reg' is < RADEON_MIN_MMIO_SIZE
92.2
'reg' is < RADEON_MIN_MMIO_SIZE
270.2
'reg' is < RADEON_MIN_MMIO_SIZE
60.1
'reg' is < RADEON_MIN_MMIO_SIZE
74.1
'reg' is < RADEON_MIN_MMIO_SIZE
82.2
'reg' is < RADEON_MIN_MMIO_SIZE
92.2
'reg' is < RADEON_MIN_MMIO_SIZE
270.2
'reg' is < RADEON_MIN_MMIO_SIZE
< RADEON_MIN_MMIO_SIZE0x10000) && !always_indirect
13.1
'always_indirect' is false
19.2
'always_indirect' is false
26.1
'always_indirect' is false
60.2
'always_indirect' is false
74.2
'always_indirect' is false
82.3
'always_indirect' is false
92.3
'always_indirect' is false
98.1
'always_indirect' is false
159.1
'always_indirect' is false
163.2
'always_indirect' is false
168.1
'always_indirect' is false
187.1
'always_indirect' is false
239.1
'always_indirect' is false
270.3
'always_indirect' is false
13.1
'always_indirect' is false
19.2
'always_indirect' is false
26.1
'always_indirect' is false
60.2
'always_indirect' is false
74.2
'always_indirect' is false
82.3
'always_indirect' is false
92.3
'always_indirect' is false
98.1
'always_indirect' is false
159.1
'always_indirect' is false
163.2
'always_indirect' is false
168.1
'always_indirect' is false
187.1
'always_indirect' is false
239.1
'always_indirect' is false
270.3
'always_indirect' is false
13.1
'always_indirect' is false
19.2
'always_indirect' is false
26.1
'always_indirect' is false
60.2
'always_indirect' is false
74.2
'always_indirect' is false
82.3
'always_indirect' is false
92.3
'always_indirect' is false
98.1
'always_indirect' is false
159.1
'always_indirect' is false
163.2
'always_indirect' is false
168.1
'always_indirect' is false
187.1
'always_indirect' is false
239.1
'always_indirect' is false
270.3
'always_indirect' is false
)
13
Assuming 'reg' is < field 'rmmio_size'
14
Taking true branch
20
Taking true branch
26
Assuming 'reg' is < field 'rmmio_size'
27
Taking true branch
60
Assuming 'reg' is >= field 'rmmio_size'
61
Taking true branch
74
Assuming 'reg' is >= field 'rmmio_size'
75
Taking true branch
83
Taking true branch
93
Taking true branch
98
Assuming 'reg' is < field 'rmmio_size'
99
Taking true branch
159
Assuming 'reg' is < field 'rmmio_size'
160
Taking true branch
164
Taking true branch
168
Assuming 'reg' is < field 'rmmio_size'
169
Taking true branch
187
Assuming 'reg' is < field 'rmmio_size'
188
Taking true branch
239
Assuming 'reg' is < field 'rmmio_size'
240
Taking true branch
271
Taking true branch
2521 return readl(((void __iomem *)rdev->rmmio) + reg)ioread32(((void *)rdev->rmmio) + reg);
15
Returning without writing to 'rdev->me_fw'
21
Returning without writing to 'rdev->me_fw'
28
Returning without writing to 'rdev->me_fw'
62
Returning without writing to 'rdev->me_fw'
76
Returning without writing to 'rdev->me_fw'
84
Returning without writing to 'rdev->me_fw'
94
Returning without writing to 'rdev->me_fw'
100
Returning without writing to 'rdev->me_fw'
161
Returning without writing to 'rdev->me_fw'
165
Returning without writing to 'rdev->me_fw'
170
Returning without writing to 'rdev->me_fw'
189
Returning without writing to 'rdev->me_fw'
241
Returning without writing to 'rdev->me_fw'
272
Returning without writing to 'rdev->me_fw'
2522 else
2523 return r100_mm_rreg_slow(rdev, reg);
2524}
2525static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
2526 bool_Bool always_indirect)
2527{
2528 if ((reg
136.1
'reg' is < field 'rmmio_size'
140.1
'reg' is < field 'rmmio_size'
144.1
'reg' is < field 'rmmio_size'
174.1
'reg' is < field 'rmmio_size'
178.1
'reg' is < field 'rmmio_size'
182.1
'reg' is < field 'rmmio_size'
191.1
'reg' is < field 'rmmio_size'
195.1
'reg' is < field 'rmmio_size'
204.1
'reg' is < field 'rmmio_size'
225.1
'reg' is < field 'rmmio_size'
229.1
'reg' is < field 'rmmio_size'
243.1
'reg' is < field 'rmmio_size'
136.1
'reg' is < field 'rmmio_size'
140.1
'reg' is < field 'rmmio_size'
144.1
'reg' is < field 'rmmio_size'
174.1
'reg' is < field 'rmmio_size'
178.1
'reg' is < field 'rmmio_size'
182.1
'reg' is < field 'rmmio_size'
191.1
'reg' is < field 'rmmio_size'
195.1
'reg' is < field 'rmmio_size'
204.1
'reg' is < field 'rmmio_size'
225.1
'reg' is < field 'rmmio_size'
229.1
'reg' is < field 'rmmio_size'
243.1
'reg' is < field 'rmmio_size'
136.1
'reg' is < field 'rmmio_size'
140.1
'reg' is < field 'rmmio_size'
144.1
'reg' is < field 'rmmio_size'
174.1
'reg' is < field 'rmmio_size'
178.1
'reg' is < field 'rmmio_size'
182.1
'reg' is < field 'rmmio_size'
191.1
'reg' is < field 'rmmio_size'
195.1
'reg' is < field 'rmmio_size'
204.1
'reg' is < field 'rmmio_size'
225.1
'reg' is < field 'rmmio_size'
229.1
'reg' is < field 'rmmio_size'
243.1
'reg' is < field 'rmmio_size'
< rdev->rmmio_size
|| reg
154.1
'reg' is < RADEON_MIN_MMIO_SIZE
264.1
'reg' is < RADEON_MIN_MMIO_SIZE
154.1
'reg' is < RADEON_MIN_MMIO_SIZE
264.1
'reg' is < RADEON_MIN_MMIO_SIZE
154.1
'reg' is < RADEON_MIN_MMIO_SIZE
264.1
'reg' is < RADEON_MIN_MMIO_SIZE
< RADEON_MIN_MMIO_SIZE0x10000) && !always_indirect
122.1
'always_indirect' is false
127.1
'always_indirect' is false
132.1
'always_indirect' is false
136.2
'always_indirect' is false
140.2
'always_indirect' is false
144.2
'always_indirect' is false
154.2
'always_indirect' is false
174.2
'always_indirect' is false
178.2
'always_indirect' is false
182.2
'always_indirect' is false
191.2
'always_indirect' is false
195.2
'always_indirect' is false
204.2
'always_indirect' is false
213.1
'always_indirect' is false
219.1
'always_indirect' is false
225.2
'always_indirect' is false
229.2
'always_indirect' is false
243.2
'always_indirect' is false
264.2
'always_indirect' is false
122.1
'always_indirect' is false
127.1
'always_indirect' is false
132.1
'always_indirect' is false
136.2
'always_indirect' is false
140.2
'always_indirect' is false
144.2
'always_indirect' is false
154.2
'always_indirect' is false
174.2
'always_indirect' is false
178.2
'always_indirect' is false
182.2
'always_indirect' is false
191.2
'always_indirect' is false
195.2
'always_indirect' is false
204.2
'always_indirect' is false
213.1
'always_indirect' is false
219.1
'always_indirect' is false
225.2
'always_indirect' is false
229.2
'always_indirect' is false
243.2
'always_indirect' is false
264.2
'always_indirect' is false
122.1
'always_indirect' is false
127.1
'always_indirect' is false
132.1
'always_indirect' is false
136.2
'always_indirect' is false
140.2
'always_indirect' is false
144.2
'always_indirect' is false
154.2
'always_indirect' is false
174.2
'always_indirect' is false
178.2
'always_indirect' is false
182.2
'always_indirect' is false
191.2
'always_indirect' is false
195.2
'always_indirect' is false
204.2
'always_indirect' is false
213.1
'always_indirect' is false
219.1
'always_indirect' is false
225.2
'always_indirect' is false
229.2
'always_indirect' is false
243.2
'always_indirect' is false
264.2
'always_indirect' is false
)
122
Assuming 'reg' is < field 'rmmio_size'
123
Taking true branch
127
Assuming 'reg' is < field 'rmmio_size'
128
Taking true branch
132
Assuming 'reg' is < field 'rmmio_size'
133
Taking true branch
137
Taking true branch
141
Taking true branch
145
Taking true branch
154
Assuming 'reg' is >= field 'rmmio_size'
155
Taking true branch
175
Taking true branch
179
Taking true branch
183
Taking true branch
192
Taking true branch
196
Taking true branch
205
Taking true branch
213
Assuming 'reg' is < field 'rmmio_size'
214
Taking true branch
219
Assuming 'reg' is < field 'rmmio_size'
220
Taking true branch
226
Taking true branch
230
Taking true branch
244
Taking true branch
264
Assuming 'reg' is >= field 'rmmio_size'
265
Taking true branch
2529 writel(v, ((void __iomem *)rdev->rmmio) + reg)iowrite32(v, ((void *)rdev->rmmio) + reg);
2530 else
2531 r100_mm_wreg_slow(rdev, reg, v);
2532}
124
Returning without writing to 'rdev->me_fw'
129
Returning without writing to 'rdev->me_fw'
134
Returning without writing to 'rdev->me_fw'
138
Returning without writing to 'rdev->me_fw'
142
Returning without writing to 'rdev->me_fw'
146
Returning without writing to 'rdev->me_fw'
156
Returning without writing to 'rdev->me_fw'
176
Returning without writing to 'rdev->me_fw'
180
Returning without writing to 'rdev->me_fw'
184
Returning without writing to 'rdev->me_fw'
193
Returning without writing to 'rdev->me_fw'
197
Returning without writing to 'rdev->me_fw'
206
Returning without writing to 'rdev->me_fw'
215
Returning without writing to 'rdev->me_fw'
221
Returning without writing to 'rdev->me_fw'
227
Returning without writing to 'rdev->me_fw'
231
Returning without writing to 'rdev->me_fw'
245
Returning without writing to 'rdev->me_fw'
266
Returning without writing to 'rdev->me_fw'
2533
2534u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
2535void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2536
2537u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
2538void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
2539
2540/*
2541 * Cast helper
2542 */
2543extern const struct dma_fence_ops radeon_fence_ops;
2544
2545static inline struct radeon_fence *to_radeon_fence(struct dma_fence *f)
2546{
2547 struct radeon_fence *__f = container_of(f, struct radeon_fence, base)({ const __typeof( ((struct radeon_fence *)0)->base ) *__mptr
= (f); (struct radeon_fence *)( (char *)__mptr - __builtin_offsetof
(struct radeon_fence, base) );})
;
2548
2549 if (__f->base.ops == &radeon_fence_ops)
2550 return __f;
2551
2552 return NULL((void *)0);
2553}
2554
2555/*
2556 * Registers read & write functions.
2557 */
2558#define RREG8(reg)ioread8((rdev->rmmio) + (reg)) readb((rdev->rmmio) + (reg))ioread8((rdev->rmmio) + (reg))
2559#define WREG8(reg, v)iowrite8(v, (rdev->rmmio) + (reg)) writeb(v, (rdev->rmmio) + (reg))iowrite8(v, (rdev->rmmio) + (reg))
2560#define RREG16(reg)ioread16((rdev->rmmio) + (reg)) readw((rdev->rmmio) + (reg))ioread16((rdev->rmmio) + (reg))
2561#define WREG16(reg, v)iowrite16(v, (rdev->rmmio) + (reg)) writew(v, (rdev->rmmio) + (reg))iowrite16(v, (rdev->rmmio) + (reg))
2562#define RREG32(reg)r100_mm_rreg(rdev, (reg), 0) r100_mm_rreg(rdev, (reg), false0)
2563#define RREG32_IDX(reg)r100_mm_rreg(rdev, (reg), 1) r100_mm_rreg(rdev, (reg), true1)
2564#define DREG32(reg)do { } while(0) pr_info("REGISTER: " #reg " : 0x%08X\n", \do { } while(0)
2565 r100_mm_rreg(rdev, (reg), false))do { } while(0)
2566#define WREG32(reg, v)r100_mm_wreg(rdev, (reg), (v), 0) r100_mm_wreg(rdev, (reg), (v), false0)
2567#define WREG32_IDX(reg, v)r100_mm_wreg(rdev, (reg), (v), 1) r100_mm_wreg(rdev, (reg), (v), true1)
2568#define REG_SET(FIELD, v)(((v) << FIELD_SHIFT) & FIELD_MASK) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2569#define REG_GET(FIELD, v)(((v) << FIELD_SHIFT) & FIELD_MASK) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2570#define RREG32_PLL(reg)rdev->pll_rreg(rdev, (reg)) rdev->pll_rreg(rdev, (reg))
2571#define WREG32_PLL(reg, v)rdev->pll_wreg(rdev, (reg), (v)) rdev->pll_wreg(rdev, (reg), (v))
2572#define RREG32_MC(reg)rdev->mc_rreg(rdev, (reg)) rdev->mc_rreg(rdev, (reg))
2573#define WREG32_MC(reg, v)rdev->mc_wreg(rdev, (reg), (v)) rdev->mc_wreg(rdev, (reg), (v))
2574#define RREG32_PCIE(reg)rv370_pcie_rreg(rdev, (reg)) rv370_pcie_rreg(rdev, (reg))
2575#define WREG32_PCIE(reg, v)rv370_pcie_wreg(rdev, (reg), (v)) rv370_pcie_wreg(rdev, (reg), (v))
2576#define RREG32_PCIE_PORT(reg)rdev->pciep_rreg(rdev, (reg)) rdev->pciep_rreg(rdev, (reg))
2577#define WREG32_PCIE_PORT(reg, v)rdev->pciep_wreg(rdev, (reg), (v)) rdev->pciep_wreg(rdev, (reg), (v))
2578#define RREG32_SMC(reg)tn_smc_rreg(rdev, (reg)) tn_smc_rreg(rdev, (reg))
2579#define WREG32_SMC(reg, v)tn_smc_wreg(rdev, (reg), (v)) tn_smc_wreg(rdev, (reg), (v))
2580#define RREG32_RCU(reg)r600_rcu_rreg(rdev, (reg)) r600_rcu_rreg(rdev, (reg))
2581#define WREG32_RCU(reg, v)r600_rcu_wreg(rdev, (reg), (v)) r600_rcu_wreg(rdev, (reg), (v))
2582#define RREG32_CG(reg)eg_cg_rreg(rdev, (reg)) eg_cg_rreg(rdev, (reg))
2583#define WREG32_CG(reg, v)eg_cg_wreg(rdev, (reg), (v)) eg_cg_wreg(rdev, (reg), (v))
2584#define RREG32_PIF_PHY0(reg)eg_pif_phy0_rreg(rdev, (reg)) eg_pif_phy0_rreg(rdev, (reg))
2585#define WREG32_PIF_PHY0(reg, v)eg_pif_phy0_wreg(rdev, (reg), (v)) eg_pif_phy0_wreg(rdev, (reg), (v))
2586#define RREG32_PIF_PHY1(reg)eg_pif_phy1_rreg(rdev, (reg)) eg_pif_phy1_rreg(rdev, (reg))
2587#define WREG32_PIF_PHY1(reg, v)eg_pif_phy1_wreg(rdev, (reg), (v)) eg_pif_phy1_wreg(rdev, (reg), (v))
2588#define RREG32_UVD_CTX(reg)r600_uvd_ctx_rreg(rdev, (reg)) r600_uvd_ctx_rreg(rdev, (reg))
2589#define WREG32_UVD_CTX(reg, v)r600_uvd_ctx_wreg(rdev, (reg), (v)) r600_uvd_ctx_wreg(rdev, (reg), (v))
2590#define RREG32_DIDT(reg)cik_didt_rreg(rdev, (reg)) cik_didt_rreg(rdev, (reg))
2591#define WREG32_DIDT(reg, v)cik_didt_wreg(rdev, (reg), (v)) cik_didt_wreg(rdev, (reg), (v))
2592#define WREG32_P(reg, val, mask)do { uint32_t tmp_ = r100_mm_rreg(rdev, (reg), 0); tmp_ &=
(mask); tmp_ |= ((val) & ~(mask)); r100_mm_wreg(rdev, (reg
), (tmp_), 0); } while (0)
\
2593 do { \
2594 uint32_t tmp_ = RREG32(reg)r100_mm_rreg(rdev, (reg), 0); \
2595 tmp_ &= (mask); \
2596 tmp_ |= ((val) & ~(mask)); \
2597 WREG32(reg, tmp_)r100_mm_wreg(rdev, (reg), (tmp_), 0); \
2598 } while (0)
2599#define WREG32_AND(reg, and)do { uint32_t tmp_ = r100_mm_rreg(rdev, (reg), 0); tmp_ &=
(and); tmp_ |= ((0) & ~(and)); r100_mm_wreg(rdev, (reg),
(tmp_), 0); } while (0)
WREG32_P(reg, 0, and)do { uint32_t tmp_ = r100_mm_rreg(rdev, (reg), 0); tmp_ &=
(and); tmp_ |= ((0) & ~(and)); r100_mm_wreg(rdev, (reg),
(tmp_), 0); } while (0)
2600#define WREG32_OR(reg, or)do { uint32_t tmp_ = r100_mm_rreg(rdev, (reg), 0); tmp_ &=
(~(or)); tmp_ |= ((or) & ~(~(or))); r100_mm_wreg(rdev, (
reg), (tmp_), 0); } while (0)
WREG32_P(reg, or, ~(or))do { uint32_t tmp_ = r100_mm_rreg(rdev, (reg), 0); tmp_ &=
(~(or)); tmp_ |= ((or) & ~(~(or))); r100_mm_wreg(rdev, (
reg), (tmp_), 0); } while (0)
2601#define WREG32_PLL_P(reg, val, mask)do { uint32_t tmp_ = rdev->pll_rreg(rdev, (reg)); tmp_ &=
(mask); tmp_ |= ((val) & ~(mask)); rdev->pll_wreg(rdev
, (reg), (tmp_)); } while (0)
\
2602 do { \
2603 uint32_t tmp_ = RREG32_PLL(reg)rdev->pll_rreg(rdev, (reg)); \
2604 tmp_ &= (mask); \
2605 tmp_ |= ((val) & ~(mask)); \
2606 WREG32_PLL(reg, tmp_)rdev->pll_wreg(rdev, (reg), (tmp_)); \
2607 } while (0)
2608#define WREG32_SMC_P(reg, val, mask)do { uint32_t tmp_ = tn_smc_rreg(rdev, (reg)); tmp_ &= (mask
); tmp_ |= ((val) & ~(mask)); tn_smc_wreg(rdev, (reg), (tmp_
)); } while (0)
\
2609 do { \
2610 uint32_t tmp_ = RREG32_SMC(reg)tn_smc_rreg(rdev, (reg)); \
2611 tmp_ &= (mask); \
2612 tmp_ |= ((val) & ~(mask)); \
2613 WREG32_SMC(reg, tmp_)tn_smc_wreg(rdev, (reg), (tmp_)); \
2614 } while (0)
2615#define DREG32_SYS(sqf, rdev, reg)seq_printf((sqf), "reg" " : 0x%08X\n", r100_mm_rreg((rdev), (
reg), 0))
seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false0))
2616#define RREG32_IO(reg)r100_io_rreg(rdev, (reg)) r100_io_rreg(rdev, (reg))
2617#define WREG32_IO(reg, v)r100_io_wreg(rdev, (reg), (v)) r100_io_wreg(rdev, (reg), (v))
2618
2619#define RDOORBELL32(index)cik_mm_rdoorbell(rdev, (index)) cik_mm_rdoorbell(rdev, (index))
2620#define WDOORBELL32(index, v)cik_mm_wdoorbell(rdev, (index), (v)) cik_mm_wdoorbell(rdev, (index), (v))
2621
2622/*
2623 * Indirect registers accessors.
2624 * They used to be inlined, but this increases code size by ~65 kbytes.
2625 * Since each performs a pair of MMIO ops
2626 * within a spin_lock_irqsave/spin_unlock_irqrestore region,
2627 * the cost of call+ret is almost negligible. MMIO and locking
2628 * costs several dozens of cycles each at best, call+ret is ~5 cycles.
2629 */
2630uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
2631void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
2632u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg);
2633void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2634u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg);
2635void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2636u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg);
2637void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2638u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg);
2639void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2640u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg);
2641void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2642u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg);
2643void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2644u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg);
2645void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v);
2646
2647void r100_pll_errata_after_index(struct radeon_device *rdev);
2648
2649
2650/*
2651 * ASICs helpers.
2652 */
2653#define ASIC_IS_RN50(rdev)((rdev->pdev->device == 0x515e) || (rdev->pdev->device
== 0x5969))
((rdev->pdev->device == 0x515e) || \
2654 (rdev->pdev->device == 0x5969))
2655#define ASIC_IS_RV100(rdev)((rdev->family == CHIP_RV100) || (rdev->family == CHIP_RV200
) || (rdev->family == CHIP_RS100) || (rdev->family == CHIP_RS200
) || (rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280
) || (rdev->family == CHIP_RS300))
((rdev->family == CHIP_RV100) || \
2656 (rdev->family == CHIP_RV200) || \
2657 (rdev->family == CHIP_RS100) || \
2658 (rdev->family == CHIP_RS200) || \
2659 (rdev->family == CHIP_RV250) || \
2660 (rdev->family == CHIP_RV280) || \
2661 (rdev->family == CHIP_RS300))
2662#define ASIC_IS_R300(rdev)((rdev->family == CHIP_R300) || (rdev->family == CHIP_RV350
) || (rdev->family == CHIP_R350) || (rdev->family == CHIP_RV380
) || (rdev->family == CHIP_R420) || (rdev->family == CHIP_R423
) || (rdev->family == CHIP_RV410) || (rdev->family == CHIP_RS400
) || (rdev->family == CHIP_RS480))
((rdev->family == CHIP_R300) || \
2663 (rdev->family == CHIP_RV350) || \
2664 (rdev->family == CHIP_R350) || \
2665 (rdev->family == CHIP_RV380) || \
2666 (rdev->family == CHIP_R420) || \
2667 (rdev->family == CHIP_R423) || \
2668 (rdev->family == CHIP_RV410) || \
2669 (rdev->family == CHIP_RS400) || \
2670 (rdev->family == CHIP_RS480))
2671#define ASIC_IS_X2(rdev)((rdev->ddev->pdev->device == 0x9441) || (rdev->ddev
->pdev->device == 0x9443) || (rdev->ddev->pdev->
device == 0x944B) || (rdev->ddev->pdev->device == 0x9506
) || (rdev->ddev->pdev->device == 0x9509) || (rdev->
ddev->pdev->device == 0x950F) || (rdev->ddev->pdev
->device == 0x689C) || (rdev->ddev->pdev->device ==
0x689D))
((rdev->ddev->pdev->device == 0x9441) || \
2672 (rdev->ddev->pdev->device == 0x9443) || \
2673 (rdev->ddev->pdev->device == 0x944B) || \
2674 (rdev->ddev->pdev->device == 0x9506) || \
2675 (rdev->ddev->pdev->device == 0x9509) || \
2676 (rdev->ddev->pdev->device == 0x950F) || \
2677 (rdev->ddev->pdev->device == 0x689C) || \
2678 (rdev->ddev->pdev->device == 0x689D))
2679#define ASIC_IS_AVIVO(rdev)((rdev->family >= CHIP_RS600)) ((rdev->family >= CHIP_RS600))
2680#define ASIC_IS_DCE2(rdev)((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690
) || (rdev->family == CHIP_RS740) || (rdev->family >=
CHIP_R600))
((rdev->family == CHIP_RS600) || \
2681 (rdev->family == CHIP_RS690) || \
2682 (rdev->family == CHIP_RS740) || \
2683 (rdev->family >= CHIP_R600))
2684#define ASIC_IS_DCE3(rdev)((rdev->family >= CHIP_RV620)) ((rdev->family >= CHIP_RV620))
2685#define ASIC_IS_DCE32(rdev)((rdev->family >= CHIP_RV730)) ((rdev->family >= CHIP_RV730))
2686#define ASIC_IS_DCE4(rdev)((rdev->family >= CHIP_CEDAR)) ((rdev->family >= CHIP_CEDAR))
2687#define ASIC_IS_DCE41(rdev)((rdev->family >= CHIP_PALM) && (rdev->flags
& RADEON_IS_IGP))
((rdev->family >= CHIP_PALM) && \
2688 (rdev->flags & RADEON_IS_IGP))
2689#define ASIC_IS_DCE5(rdev)((rdev->family >= CHIP_BARTS)) ((rdev->family >= CHIP_BARTS))
2690#define ASIC_IS_DCE6(rdev)((rdev->family >= CHIP_ARUBA)) ((rdev->family >= CHIP_ARUBA))
2691#define ASIC_IS_DCE61(rdev)((rdev->family >= CHIP_ARUBA) && (rdev->flags
& RADEON_IS_IGP))
((rdev->family >= CHIP_ARUBA) && \
2692 (rdev->flags & RADEON_IS_IGP))
2693#define ASIC_IS_DCE64(rdev)((rdev->family == CHIP_OLAND)) ((rdev->family == CHIP_OLAND))
2694#define ASIC_IS_NODCE(rdev)((rdev->family == CHIP_HAINAN)) ((rdev->family == CHIP_HAINAN))
2695#define ASIC_IS_DCE8(rdev)((rdev->family >= CHIP_BONAIRE)) ((rdev->family >= CHIP_BONAIRE))
2696#define ASIC_IS_DCE81(rdev)((rdev->family == CHIP_KAVERI)) ((rdev->family == CHIP_KAVERI))
2697#define ASIC_IS_DCE82(rdev)((rdev->family == CHIP_BONAIRE)) ((rdev->family == CHIP_BONAIRE))
2698#define ASIC_IS_DCE83(rdev)((rdev->family == CHIP_KABINI) || (rdev->family == CHIP_MULLINS
))
((rdev->family == CHIP_KABINI) || \
2699 (rdev->family == CHIP_MULLINS))
2700
2701#define ASIC_IS_LOMBOK(rdev)((rdev->ddev->pdev->device == 0x6849) || (rdev->ddev
->pdev->device == 0x6850) || (rdev->ddev->pdev->
device == 0x6858) || (rdev->ddev->pdev->device == 0x6859
) || (rdev->ddev->pdev->device == 0x6840) || (rdev->
ddev->pdev->device == 0x6841) || (rdev->ddev->pdev
->device == 0x6842) || (rdev->ddev->pdev->device ==
0x6843))
((rdev->ddev->pdev->device == 0x6849) || \
2702 (rdev->ddev->pdev->device == 0x6850) || \
2703 (rdev->ddev->pdev->device == 0x6858) || \
2704 (rdev->ddev->pdev->device == 0x6859) || \
2705 (rdev->ddev->pdev->device == 0x6840) || \
2706 (rdev->ddev->pdev->device == 0x6841) || \
2707 (rdev->ddev->pdev->device == 0x6842) || \
2708 (rdev->ddev->pdev->device == 0x6843))
2709
2710/*
2711 * BIOS helpers.
2712 */
2713#define RBIOS8(i)(rdev->bios[i]) (rdev->bios[i])
2714#define RBIOS16(i)((rdev->bios[i]) | ((rdev->bios[(i)+1]) << 8)) (RBIOS8(i)(rdev->bios[i]) | (RBIOS8((i)+1)(rdev->bios[(i)+1]) << 8))
2715#define RBIOS32(i)((((rdev->bios[i]) | ((rdev->bios[(i)+1]) << 8)))
| (((rdev->bios[(i)+2]) | ((rdev->bios[((i)+2)+1]) <<
8)) << 16))
((RBIOS16(i)((rdev->bios[i]) | ((rdev->bios[(i)+1]) << 8))) | (RBIOS16((i)+2)((rdev->bios[(i)+2]) | ((rdev->bios[((i)+2)+1]) <<
8))
<< 16))
2716
2717int radeon_combios_init(struct radeon_device *rdev);
2718void radeon_combios_fini(struct radeon_device *rdev);
2719int radeon_atombios_init(struct radeon_device *rdev);
2720void radeon_atombios_fini(struct radeon_device *rdev);
2721
2722
2723/*
2724 * RING helpers.
2725 */
2726
2727/**
2728 * radeon_ring_write - write a value to the ring
2729 *
2730 * @ring: radeon_ring structure holding ring information
2731 * @v: dword (dw) value to write
2732 *
2733 * Write a value to the requested ring buffer (all asics).
2734 */
2735static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
2736{
2737 if (ring->count_dw <= 0)
2738 DRM_ERROR("radeon: writing more dwords to the ring than expected!\n")__drm_err("radeon: writing more dwords to the ring than expected!\n"
)
;
2739
2740 ring->ring[ring->wptr++] = v;
2741 ring->wptr &= ring->ptr_mask;
2742 ring->count_dw--;
2743 ring->ring_free_dw--;
2744}
2745
2746/*
2747 * ASICs macro.
2748 */
2749#define radeon_init(rdev)(rdev)->asic->init((rdev)) (rdev)->asic->init((rdev))
2750#define radeon_fini(rdev)(rdev)->asic->fini((rdev)) (rdev)->asic->fini((rdev))
2751#define radeon_resume(rdev)(rdev)->asic->resume((rdev)) (rdev)->asic->resume((rdev))
2752#define radeon_suspend(rdev)(rdev)->asic->suspend((rdev)) (rdev)->asic->suspend((rdev))
2753#define radeon_cs_parse(rdev, r, p)(rdev)->asic->ring[(r)]->cs_parse((p)) (rdev)->asic->ring[(r)]->cs_parse((p))
2754#define radeon_vga_set_state(rdev, state)(rdev)->asic->vga_set_state((rdev), (state)) (rdev)->asic->vga_set_state((rdev), (state))
2755#define radeon_asic_reset(rdev)(rdev)->asic->asic_reset((rdev), 0) (rdev)->asic->asic_reset((rdev), false0)
2756#define radeon_gart_tlb_flush(rdev)(rdev)->asic->gart.tlb_flush((rdev)) (rdev)->asic->gart.tlb_flush((rdev))
2757#define radeon_gart_get_page_entry(a, f)(rdev)->asic->gart.get_page_entry((a), (f)) (rdev)->asic->gart.get_page_entry((a), (f))
2758#define radeon_gart_set_page(rdev, i, e)(rdev)->asic->gart.set_page((rdev), (i), (e)) (rdev)->asic->gart.set_page((rdev), (i), (e))
2759#define radeon_asic_vm_init(rdev)(rdev)->asic->vm.init((rdev)) (rdev)->asic->vm.init((rdev))
2760#define radeon_asic_vm_fini(rdev)(rdev)->asic->vm.fini((rdev)) (rdev)->asic->vm.fini((rdev))
2761#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count)((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src),
(count)))
((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
2762#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags)((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr
), (count), (incr), (flags)))
((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2763#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags)((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr),
(count), (incr), (flags)))
((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
2764#define radeon_asic_vm_pad_ib(rdev, ib)((rdev)->asic->vm.pad_ib((ib))) ((rdev)->asic->vm.pad_ib((ib)))
2765#define radeon_ring_start(rdev, r, cp)(rdev)->asic->ring[(r)]->ring_start((rdev), (cp)) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
2766#define radeon_ring_test(rdev, r, cp)(rdev)->asic->ring[(r)]->ring_test((rdev), (cp)) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
2767#define radeon_ib_test(rdev, r, cp)(rdev)->asic->ring[(r)]->ib_test((rdev), (cp)) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
2768#define radeon_ring_ib_execute(rdev, r, ib)(rdev)->asic->ring[(r)]->ib_execute((rdev), (ib)) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
2769#define radeon_ring_ib_parse(rdev, r, ib)(rdev)->asic->ring[(r)]->ib_parse((rdev), (ib)) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
2770#define radeon_ring_is_lockup(rdev, r, cp)(rdev)->asic->ring[(r)]->is_lockup((rdev), (cp)) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
2771#define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr)(rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r
), (vm_id), (pd_addr))
(rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
2772#define radeon_ring_get_rptr(rdev, r)(rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r
))
(rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
2773#define radeon_ring_get_wptr(rdev, r)(rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r
))
(rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
2774#define radeon_ring_set_wptr(rdev, r)(rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r
))
(rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
2775#define radeon_irq_set(rdev)(rdev)->asic->irq.set((rdev)) (rdev)->asic->irq.set((rdev))
2776#define radeon_irq_process(rdev)(rdev)->asic->irq.process((rdev)) (rdev)->asic->irq.process((rdev))
2777#define radeon_get_vblank_counter(rdev, crtc)(rdev)->asic->display.get_vblank_counter((rdev), (crtc)
)
(rdev)->asic->display.get_vblank_counter((rdev), (crtc))
2778#define radeon_set_backlight_level(rdev, e, l)(rdev)->asic->display.set_backlight_level((e), (l)) (rdev)->asic->display.set_backlight_level((e), (l))
2779#define radeon_get_backlight_level(rdev, e)(rdev)->asic->display.get_backlight_level((e)) (rdev)->asic->display.get_backlight_level((e))
2780#define radeon_hdmi_enable(rdev, e, b)(rdev)->asic->display.hdmi_enable((e), (b)) (rdev)->asic->display.hdmi_enable((e), (b))
2781#define radeon_hdmi_setmode(rdev, e, m)(rdev)->asic->display.hdmi_setmode((e), (m)) (rdev)->asic->display.hdmi_setmode((e), (m))
2782#define radeon_fence_ring_emit(rdev, r, fence)(rdev)->asic->ring[(r)]->emit_fence((rdev), (fence)) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
2783#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait)(rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp)
, (semaphore), (emit_wait))
(rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
2784#define radeon_copy_blit(rdev, s, d, np, resv)(rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv)) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv))
2785#define radeon_copy_dma(rdev, s, d, np, resv)(rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv)) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv))
2786#define radeon_copy(rdev, s, d, np, resv)(rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv)) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv))
2787#define radeon_copy_blit_ring_index(rdev)(rdev)->asic->copy.blit_ring_index (rdev)->asic->copy.blit_ring_index
2788#define radeon_copy_dma_ring_index(rdev)(rdev)->asic->copy.dma_ring_index (rdev)->asic->copy.dma_ring_index
2789#define radeon_copy_ring_index(rdev)(rdev)->asic->copy.copy_ring_index (rdev)->asic->copy.copy_ring_index
2790#define radeon_get_engine_clock(rdev)(rdev)->asic->pm.get_engine_clock((rdev)) (rdev)->asic->pm.get_engine_clock((rdev))
2791#define radeon_set_engine_clock(rdev, e)(rdev)->asic->pm.set_engine_clock((rdev), (e)) (rdev)->asic->pm.set_engine_clock((rdev), (e))
2792#define radeon_get_memory_clock(rdev)(rdev)->asic->pm.get_memory_clock((rdev)) (rdev)->asic->pm.get_memory_clock((rdev))
2793#define radeon_set_memory_clock(rdev, e)(rdev)->asic->pm.set_memory_clock((rdev), (e)) (rdev)->asic->pm.set_memory_clock((rdev), (e))
2794#define radeon_get_pcie_lanes(rdev)(rdev)->asic->pm.get_pcie_lanes((rdev)) (rdev)->asic->pm.get_pcie_lanes((rdev))
2795#define radeon_set_pcie_lanes(rdev, l)(rdev)->asic->pm.set_pcie_lanes((rdev), (l)) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
2796#define radeon_set_clock_gating(rdev, e)(rdev)->asic->pm.set_clock_gating((rdev), (e)) (rdev)->asic->pm.set_clock_gating((rdev), (e))
2797#define radeon_set_uvd_clocks(rdev, v, d)(rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d)) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
2798#define radeon_set_vce_clocks(rdev, ev, ec)(rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec)) (rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec))
2799#define radeon_get_temperature(rdev)(rdev)->asic->pm.get_temperature((rdev)) (rdev)->asic->pm.get_temperature((rdev))
2800#define radeon_set_surface_reg(rdev, r, f, p, o, s)((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (
o), (s)))
((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
2801#define radeon_clear_surface_reg(rdev, r)((rdev)->asic->surface.clear_reg((rdev), (r))) ((rdev)->asic->surface.clear_reg((rdev), (r)))
2802#define radeon_bandwidth_update(rdev)(rdev)->asic->display.bandwidth_update((rdev)) (rdev)->asic->display.bandwidth_update((rdev))
2803#define radeon_hpd_init(rdev)(rdev)->asic->hpd.init((rdev)) (rdev)->asic->hpd.init((rdev))
2804#define radeon_hpd_fini(rdev)(rdev)->asic->hpd.fini((rdev)) (rdev)->asic->hpd.fini((rdev))
2805#define radeon_hpd_sense(rdev, h)(rdev)->asic->hpd.sense((rdev), (h)) (rdev)->asic->hpd.sense((rdev), (h))
2806#define radeon_hpd_set_polarity(rdev, h)(rdev)->asic->hpd.set_polarity((rdev), (h)) (rdev)->asic->hpd.set_polarity((rdev), (h))
2807#define radeon_gui_idle(rdev)(rdev)->asic->gui_idle((rdev)) (rdev)->asic->gui_idle((rdev))
2808#define radeon_pm_misc(rdev)(rdev)->asic->pm.misc((rdev)) (rdev)->asic->pm.misc((rdev))
2809#define radeon_pm_prepare(rdev)(rdev)->asic->pm.prepare((rdev)) (rdev)->asic->pm.prepare((rdev))
2810#define radeon_pm_finish(rdev)(rdev)->asic->pm.finish((rdev)) (rdev)->asic->pm.finish((rdev))
2811#define radeon_pm_init_profile(rdev)(rdev)->asic->pm.init_profile((rdev)) (rdev)->asic->pm.init_profile((rdev))
2812#define radeon_pm_get_dynpm_state(rdev)(rdev)->asic->pm.get_dynpm_state((rdev)) (rdev)->asic->pm.get_dynpm_state((rdev))
2813#define radeon_page_flip(rdev, crtc, base, async)(rdev)->asic->pflip.page_flip((rdev), (crtc), (base), (
async))
(rdev)->asic->pflip.page_flip((rdev), (crtc), (base), (async))
2814#define radeon_page_flip_pending(rdev, crtc)(rdev)->asic->pflip.page_flip_pending((rdev), (crtc)) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
2815#define radeon_wait_for_vblank(rdev, crtc)(rdev)->asic->display.wait_for_vblank((rdev), (crtc)) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
2816#define radeon_mc_wait_for_idle(rdev)(rdev)->asic->mc_wait_for_idle((rdev)) (rdev)->asic->mc_wait_for_idle((rdev))
2817#define radeon_get_xclk(rdev)(rdev)->asic->get_xclk((rdev)) (rdev)->asic->get_xclk((rdev))
2818#define radeon_get_gpu_clock_counter(rdev)(rdev)->asic->get_gpu_clock_counter((rdev)) (rdev)->asic->get_gpu_clock_counter((rdev))
2819#define radeon_get_allowed_info_register(rdev, r, v)(rdev)->asic->get_allowed_info_register((rdev), (r), (v
))
(rdev)->asic->get_allowed_info_register((rdev), (r), (v))
2820#define radeon_dpm_init(rdev)rdev->asic->dpm.init((rdev)) rdev->asic->dpm.init((rdev))
2821#define radeon_dpm_setup_asic(rdev)rdev->asic->dpm.setup_asic((rdev)) rdev->asic->dpm.setup_asic((rdev))
2822#define radeon_dpm_enable(rdev)rdev->asic->dpm.enable((rdev)) rdev->asic->dpm.enable((rdev))
2823#define radeon_dpm_late_enable(rdev)rdev->asic->dpm.late_enable((rdev)) rdev->asic->dpm.late_enable((rdev))
2824#define radeon_dpm_disable(rdev)rdev->asic->dpm.disable((rdev)) rdev->asic->dpm.disable((rdev))
2825#define radeon_dpm_pre_set_power_state(rdev)rdev->asic->dpm.pre_set_power_state((rdev)) rdev->asic->dpm.pre_set_power_state((rdev))
2826#define radeon_dpm_set_power_state(rdev)rdev->asic->dpm.set_power_state((rdev)) rdev->asic->dpm.set_power_state((rdev))
2827#define radeon_dpm_post_set_power_state(rdev)rdev->asic->dpm.post_set_power_state((rdev)) rdev->asic->dpm.post_set_power_state((rdev))
2828#define radeon_dpm_display_configuration_changed(rdev)rdev->asic->dpm.display_configuration_changed((rdev)) rdev->asic->dpm.display_configuration_changed((rdev))
2829#define radeon_dpm_fini(rdev)rdev->asic->dpm.fini((rdev)) rdev->asic->dpm.fini((rdev))
2830#define radeon_dpm_get_sclk(rdev, l)rdev->asic->dpm.get_sclk((rdev), (l)) rdev->asic->dpm.get_sclk((rdev), (l))
2831#define radeon_dpm_get_mclk(rdev, l)rdev->asic->dpm.get_mclk((rdev), (l)) rdev->asic->dpm.get_mclk((rdev), (l))
2832#define radeon_dpm_print_power_state(rdev, ps)rdev->asic->dpm.print_power_state((rdev), (ps)) rdev->asic->dpm.print_power_state((rdev), (ps))
2833#define radeon_dpm_debugfs_print_current_performance_level(rdev, m)rdev->asic->dpm.debugfs_print_current_performance_level
((rdev), (m))
rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
2834#define radeon_dpm_force_performance_level(rdev, l)rdev->asic->dpm.force_performance_level((rdev), (l)) rdev->asic->dpm.force_performance_level((rdev), (l))
2835#define radeon_dpm_vblank_too_short(rdev)rdev->asic->dpm.vblank_too_short((rdev)) rdev->asic->dpm.vblank_too_short((rdev))
2836#define radeon_dpm_powergate_uvd(rdev, g)rdev->asic->dpm.powergate_uvd((rdev), (g)) rdev->asic->dpm.powergate_uvd((rdev), (g))
2837#define radeon_dpm_enable_bapm(rdev, e)rdev->asic->dpm.enable_bapm((rdev), (e)) rdev->asic->dpm.enable_bapm((rdev), (e))
2838#define radeon_dpm_get_current_sclk(rdev)rdev->asic->dpm.get_current_sclk((rdev)) rdev->asic->dpm.get_current_sclk((rdev))
2839#define radeon_dpm_get_current_mclk(rdev)rdev->asic->dpm.get_current_mclk((rdev)) rdev->asic->dpm.get_current_mclk((rdev))
2840
2841/* Common functions */
2842/* AGP */
2843extern int radeon_gpu_reset(struct radeon_device *rdev);
2844extern void radeon_pci_config_reset(struct radeon_device *rdev);
2845extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool_Bool hung);
2846extern void radeon_agp_disable(struct radeon_device *rdev);
2847extern int radeon_modeset_init(struct radeon_device *rdev);
2848extern void radeon_modeset_fini(struct radeon_device *rdev);
2849extern bool_Bool radeon_card_posted(struct radeon_device *rdev);
2850extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
2851extern void radeon_update_display_priority(struct radeon_device *rdev);
2852extern bool_Bool radeon_boot_test_post_card(struct radeon_device *rdev);
2853extern void radeon_scratch_init(struct radeon_device *rdev);
2854extern void radeon_wb_fini(struct radeon_device *rdev);
2855extern int radeon_wb_init(struct radeon_device *rdev);
2856extern void radeon_wb_disable(struct radeon_device *rdev);
2857extern void radeon_surface_init(struct radeon_device *rdev);
2858extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
2859extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
2860extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
2861extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
2862extern bool_Bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
2863extern int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
2864 struct ttm_tt *ttm, uint64_t addr,
2865 uint32_t flags);
2866extern bool_Bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm);
2867extern bool_Bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm);
2868bool_Bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
2869extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
2870extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
2871extern int radeon_resume_kms(struct drm_device *dev, bool_Bool resume, bool_Bool fbcon);
2872extern int radeon_suspend_kms(struct drm_device *dev, bool_Bool suspend,
2873 bool_Bool fbcon, bool_Bool freeze);
2874extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
2875extern void radeon_program_register_sequence(struct radeon_device *rdev,
2876 const u32 *registers,
2877 const u32 array_size);
2878struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev);
2879
2880/*
2881 * vm
2882 */
2883int radeon_vm_manager_init(struct radeon_device *rdev);
2884void radeon_vm_manager_fini(struct radeon_device *rdev);
2885int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
2886void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
2887struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
2888 struct radeon_vm *vm,
2889 struct list_head *head);
2890struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
2891 struct radeon_vm *vm, int ring);
2892void radeon_vm_flush(struct radeon_device *rdev,
2893 struct radeon_vm *vm,
2894 int ring, struct radeon_fence *fence);
2895void radeon_vm_fence(struct radeon_device *rdev,
2896 struct radeon_vm *vm,
2897 struct radeon_fence *fence);
2898uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
2899int radeon_vm_update_page_directory(struct radeon_device *rdev,
2900 struct radeon_vm *vm);
2901int radeon_vm_clear_freed(struct radeon_device *rdev,
2902 struct radeon_vm *vm);
2903int radeon_vm_clear_invalids(struct radeon_device *rdev,
2904 struct radeon_vm *vm);
2905int radeon_vm_bo_update(struct radeon_device *rdev,
2906 struct radeon_bo_va *bo_va,
2907 struct ttm_resource *mem);
2908void radeon_vm_bo_invalidate(struct radeon_device *rdev,
2909 struct radeon_bo *bo);
2910struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
2911 struct radeon_bo *bo);
2912struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
2913 struct radeon_vm *vm,
2914 struct radeon_bo *bo);
2915int radeon_vm_bo_set_addr(struct radeon_device *rdev,
2916 struct radeon_bo_va *bo_va,
2917 uint64_t offset,
2918 uint32_t flags);
2919void radeon_vm_bo_rmv(struct radeon_device *rdev,
2920 struct radeon_bo_va *bo_va);
2921
2922/* audio */
2923void r600_audio_update_hdmi(struct work_struct *work);
2924struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
2925struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
2926void r600_audio_enable(struct radeon_device *rdev,
2927 struct r600_audio_pin *pin,
2928 u8 enable_mask);
2929void dce6_audio_enable(struct radeon_device *rdev,
2930 struct r600_audio_pin *pin,
2931 u8 enable_mask);
2932
2933/*
2934 * R600 vram scratch functions
2935 */
2936int r600_vram_scratch_init(struct radeon_device *rdev);
2937void r600_vram_scratch_fini(struct radeon_device *rdev);
2938
2939/*
2940 * r600 cs checking helper
2941 */
2942unsigned r600_mip_minify(unsigned size, unsigned level);
2943bool_Bool r600_fmt_is_valid_color(u32 format);
2944bool_Bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
2945int r600_fmt_get_blocksize(u32 format);
2946int r600_fmt_get_nblocksx(u32 format, u32 w);
2947int r600_fmt_get_nblocksy(u32 format, u32 h);
2948
2949/*
2950 * r600 functions used by radeon_encoder.c
2951 */
2952struct radeon_hdmi_acr {
2953 u32 clock;
2954
2955 int n_32khz;
2956 int cts_32khz;
2957
2958 int n_44_1khz;
2959 int cts_44_1khz;
2960
2961 int n_48khz;
2962 int cts_48khz;
2963
2964};
2965
2966extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
2967
2968extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
2969 u32 tiling_pipe_num,
2970 u32 max_rb_num,
2971 u32 total_max_rb_num,
2972 u32 enabled_rb_mask);
2973
2974/*
2975 * evergreen functions used by radeon_encoder.c
2976 */
2977
2978extern int ni_init_microcode(struct radeon_device *rdev);
2979extern int ni_mc_load_microcode(struct radeon_device *rdev);
2980
2981/* radeon_acpi.c */
2982#if defined(CONFIG_ACPI1)
2983extern int radeon_acpi_init(struct radeon_device *rdev);
2984extern void radeon_acpi_fini(struct radeon_device *rdev);
2985extern bool_Bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev);
2986extern int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
2987 u8 perf_req, bool_Bool advertise);
2988extern int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev);
2989#else
2990static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
2991static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
2992#endif
2993
2994int radeon_cs_packet_parse(struct radeon_cs_parser *p,
2995 struct radeon_cs_packet *pkt,
2996 unsigned idx);
2997bool_Bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
2998void radeon_cs_dump_packet(struct radeon_cs_parser *p,
2999 struct radeon_cs_packet *pkt);
3000int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
3001 struct radeon_bo_list **cs_reloc,
3002 int nomm);
3003int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
3004 uint32_t *vline_start_end,
3005 uint32_t *vline_status);
3006
3007/* interrupt control register helpers */
3008void radeon_irq_kms_set_irq_n_enabled(struct radeon_device *rdev,
3009 u32 reg, u32 mask,
3010 bool_Bool enable, const char *name,
3011 unsigned n);
3012
3013#include "radeon_object.h"
3014
3015#endif

/usr/src/sys/dev/pci/drm/include/linux/firmware.h

1/* Public domain. */
2
3#ifndef _LINUX_FIRMWARE_H
4#define _LINUX_FIRMWARE_H
5
6#include <sys/types.h>
7#include <sys/malloc.h>
8#include <sys/device.h>
9#include <linux/types.h>
10#include <linux/gfp.h>
11
12#ifndef __DECONST
13#define __DECONST(type, var)((type)(__uintptr_t)(const void *)(var)) ((type)(__uintptr_t)(const void *)(var))
14#endif
15
16struct firmware {
17 size_t size;
18 const u8 *data;
19};
20
21static inline int
22request_firmware(const struct firmware **fw, const char *name,
23 struct device *device)
24{
25 int r;
26 struct firmware *f = malloc(sizeof(struct firmware), M_DRM145,
27 M_WAITOK0x0001 | M_ZERO0x0008);
28 r = loadfirmware(name, __DECONST(u_char **, &f->data)((u_char **)(__uintptr_t)(const void *)(&f->data)), &f->size);
29 if (r != 0) {
306
Assuming 'r' is not equal to 0
307
Taking true branch
30 free(f, M_DRM145, sizeof(struct firmware));
31 *fw = NULL((void *)0);
308
Null pointer value stored to field 'me_fw'
32 return -r;
33 } else {
34 *fw = f;
35 return 0;
36 }
37}
38
39static inline int
40request_firmware_direct(const struct firmware **fw, const char *name,
41 struct device *device)
42{
43 return request_firmware(fw, name, device);
44}
45
46#define request_firmware_nowait(a, b, c, d, e, f, g)-22 -EINVAL22
47
48static inline void
49release_firmware(const struct firmware *fw)
50{
51 if (fw)
52 free(__DECONST(u_char *, fw->data)((u_char *)(__uintptr_t)(const void *)(fw->data)), M_DEVBUF2, fw->size);
53 free(__DECONST(struct firmware *, fw)((struct firmware *)(__uintptr_t)(const void *)(fw)), M_DRM145, sizeof(*fw));
54}
55
56#endif