Bug Summary

File:dev/pci/drm/amd/amdgpu/amdgpu_atom.c
Warning:line 963, column 2
Value stored to 'dst' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name amdgpu_atom.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_atom.c
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <asm/unaligned.h>
29
30#include <drm/drm_util.h>
31
32#define ATOM_DEBUG
33
34#include "atom.h"
35#include "atom-names.h"
36#include "atom-bits.h"
37#include "amdgpu.h"
38
39#define ATOM_COND_ABOVE0 0
40#define ATOM_COND_ABOVEOREQUAL1 1
41#define ATOM_COND_ALWAYS2 2
42#define ATOM_COND_BELOW3 3
43#define ATOM_COND_BELOWOREQUAL4 4
44#define ATOM_COND_EQUAL5 5
45#define ATOM_COND_NOTEQUAL6 6
46
47#define ATOM_PORT_ATI0 0
48#define ATOM_PORT_PCI1 1
49#define ATOM_PORT_SYSIO2 2
50
51#define ATOM_UNIT_MICROSEC0 0
52#define ATOM_UNIT_MILLISEC1 1
53
54#define PLL_INDEX2 2
55#define PLL_DATA3 3
56
57#define ATOM_CMD_TIMEOUT_SEC20 20
58
59typedef struct {
60 struct atom_context *ctx;
61 uint32_t *ps, *ws;
62 int ps_shift;
63 uint16_t start;
64 unsigned last_jump;
65 unsigned long last_jump_jiffies;
66 bool_Bool abort;
67} atom_exec_context;
68
69int amdgpu_atom_debug = 0;
70static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
71int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
72
73static uint32_t atom_arg_mask[8] =
74 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
750xFF000000 };
76static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
77
78static int atom_dst_to_src[8][4] = {
79 /* translate destination alignment field to the source alignment encoding */
80 {0, 0, 0, 0},
81 {1, 2, 3, 0},
82 {1, 2, 3, 0},
83 {1, 2, 3, 0},
84 {4, 5, 6, 7},
85 {4, 5, 6, 7},
86 {4, 5, 6, 7},
87 {4, 5, 6, 7},
88};
89static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
90
91static int debug_depth = 0;
92#ifdef ATOM_DEBUG
93static void debug_print_spaces(int n)
94{
95 while (n--)
96 printk(" ");
97}
98
99#ifdef DEBUG
100#undef DEBUG
101#endif
102
103#define DEBUG(...)do if (amdgpu_atom_debug) { printk("\0017" ...); } while (0) do if (amdgpu_atom_debug) { printk(KERN_DEBUG"\0017" __VA_ARGS__); } while (0)
104#define SDEBUG(...)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(...); } while (0)
do if (amdgpu_atom_debug) { printk(KERN_DEBUG"\0017"); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
105#else
106#define DEBUG(...)do if (amdgpu_atom_debug) { printk("\0017" ...); } while (0) do { } while (0)
107#define SDEBUG(...)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(...); } while (0)
do { } while (0)
108#endif
109
110static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
111 uint32_t index, uint32_t data)
112{
113 uint32_t temp = 0xCDCDCDCD;
114
115 while (1)
116 switch (CU8(base)get_u8(ctx->bios, (base))) {
117 case ATOM_IIO_NOP0:
118 base++;
119 break;
120 case ATOM_IIO_READ2:
121 temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1)get_u16(ctx->bios, (base + 1)));
122 base += 3;
123 break;
124 case ATOM_IIO_WRITE3:
125 ctx->card->ioreg_write(ctx->card, CU16(base + 1)get_u16(ctx->bios, (base + 1)), temp);
126 base += 3;
127 break;
128 case ATOM_IIO_CLEAR4:
129 temp &=
130 ~((0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1)))) <<
131 CU8(base + 2)get_u8(ctx->bios, (base + 2)));
132 base += 3;
133 break;
134 case ATOM_IIO_SET5:
135 temp |=
136 (0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1)))) << CU8(base +get_u8(ctx->bios, (base + 2))
137 2)get_u8(ctx->bios, (base + 2));
138 base += 3;
139 break;
140 case ATOM_IIO_MOVE_INDEX6:
141 temp &=
142 ~((0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1)))) <<
143 CU8(base + 3)get_u8(ctx->bios, (base + 3)));
144 temp |=
145 ((index >> CU8(base + 2)get_u8(ctx->bios, (base + 2))) &
146 (0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1))))) << CU8(base +get_u8(ctx->bios, (base + 3))
147 3)get_u8(ctx->bios, (base + 3));
148 base += 4;
149 break;
150 case ATOM_IIO_MOVE_DATA8:
151 temp &=
152 ~((0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1)))) <<
153 CU8(base + 3)get_u8(ctx->bios, (base + 3)));
154 temp |=
155 ((data >> CU8(base + 2)get_u8(ctx->bios, (base + 2))) &
156 (0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1))))) << CU8(base +get_u8(ctx->bios, (base + 3))
157 3)get_u8(ctx->bios, (base + 3));
158 base += 4;
159 break;
160 case ATOM_IIO_MOVE_ATTR7:
161 temp &=
162 ~((0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1)))) <<
163 CU8(base + 3)get_u8(ctx->bios, (base + 3)));
164 temp |=
165 ((ctx->
166 io_attr >> CU8(base + 2)get_u8(ctx->bios, (base + 2))) & (0xFFFFFFFF >> (32 -
167 CU8get_u8(ctx->bios, (base + 1))
168 (baseget_u8(ctx->bios, (base + 1))
169 +get_u8(ctx->bios, (base + 1))
170 1)get_u8(ctx->bios, (base + 1)))))
171 << CU8(base + 3)get_u8(ctx->bios, (base + 3));
172 base += 4;
173 break;
174 case ATOM_IIO_END9:
175 return temp;
176 default:
177 pr_info("Unknown IIO opcode\n")do { } while(0);
178 return 0;
179 }
180}
181
182static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
183 int *ptr, uint32_t *saved, int print)
184{
185 uint32_t idx, val = 0xCDCDCDCD, align, arg;
186 struct atom_context *gctx = ctx->ctx;
187 arg = attr & 7;
188 align = (attr >> 3) & 7;
189 switch (arg) {
190 case ATOM_ARG_REG0:
191 idx = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
192 (*ptr) += 2;
193 if (print)
194 DEBUG("REG[0x%04X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "REG[0x%04X]", idx
); } while (0)
;
195 idx += gctx->reg_block;
196 switch (gctx->io_mode) {
197 case ATOM_IO_MM0:
198 val = gctx->card->reg_read(gctx->card, idx);
199 break;
200 case ATOM_IO_PCI1:
201 pr_info("PCI registers are not implemented\n")do { } while(0);
202 return 0;
203 case ATOM_IO_SYSIO2:
204 pr_info("SYSIO registers are not implemented\n")do { } while(0);
205 return 0;
206 default:
207 if (!(gctx->io_mode & 0x80)) {
208 pr_info("Bad IO mode\n")do { } while(0);
209 return 0;
210 }
211 if (!gctx->iio[gctx->io_mode & 0x7F]) {
212 pr_info("Undefined indirect IO read method %d\n",do { } while(0)
213 gctx->io_mode & 0x7F)do { } while(0);
214 return 0;
215 }
216 val =
217 atom_iio_execute(gctx,
218 gctx->iio[gctx->io_mode & 0x7F],
219 idx, 0);
220 }
221 break;
222 case ATOM_ARG_PS1:
223 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
224 (*ptr)++;
225 /* get_unaligned_le32 avoids unaligned accesses from atombios
226 * tables, noticed on a DEC Alpha. */
227 val = get_unaligned_le32((u32 *)&ctx->ps[idx])((__uint32_t)(*(__uint32_t *)((u32 *)&ctx->ps[idx])));
228 if (print)
229 DEBUG("PS[0x%02X,0x%04X]", idx, val)do if (amdgpu_atom_debug) { printk("\0017" "PS[0x%02X,0x%04X]"
, idx, val); } while (0)
;
230 break;
231 case ATOM_ARG_WS2:
232 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
233 (*ptr)++;
234 if (print)
235 DEBUG("WS[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "WS[0x%02X]", idx)
; } while (0)
;
236 switch (idx) {
237 case ATOM_WS_QUOTIENT0x40:
238 val = gctx->divmul[0];
239 break;
240 case ATOM_WS_REMAINDER0x41:
241 val = gctx->divmul[1];
242 break;
243 case ATOM_WS_DATAPTR0x42:
244 val = gctx->data_block;
245 break;
246 case ATOM_WS_SHIFT0x43:
247 val = gctx->shift;
248 break;
249 case ATOM_WS_OR_MASK0x44:
250 val = 1 << gctx->shift;
251 break;
252 case ATOM_WS_AND_MASK0x45:
253 val = ~(1 << gctx->shift);
254 break;
255 case ATOM_WS_FB_WINDOW0x46:
256 val = gctx->fb_base;
257 break;
258 case ATOM_WS_ATTRIBUTES0x47:
259 val = gctx->io_attr;
260 break;
261 case ATOM_WS_REGPTR0x48:
262 val = gctx->reg_block;
263 break;
264 default:
265 val = ctx->ws[idx];
266 }
267 break;
268 case ATOM_ARG_ID4:
269 idx = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
270 (*ptr) += 2;
271 if (print) {
272 if (gctx->data_block)
273 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block)do if (amdgpu_atom_debug) { printk("\0017" "ID[0x%04X+%04X]",
idx, gctx->data_block); } while (0)
;
274 else
275 DEBUG("ID[0x%04X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "ID[0x%04X]", idx)
; } while (0)
;
276 }
277 val = U32(idx + gctx->data_block)get_u32(ctx->ctx->bios, (idx + gctx->data_block));
278 break;
279 case ATOM_ARG_FB3:
280 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
281 (*ptr)++;
282 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
283 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",__drm_err("ATOM: fb read beyond scratch region: %d vs. %d\n",
gctx->fb_base + (idx * 4), gctx->scratch_size_bytes)
284 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes)__drm_err("ATOM: fb read beyond scratch region: %d vs. %d\n",
gctx->fb_base + (idx * 4), gctx->scratch_size_bytes)
;
285 val = 0;
286 } else
287 val = gctx->scratch[(gctx->fb_base / 4) + idx];
288 if (print)
289 DEBUG("FB[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "FB[0x%02X]", idx)
; } while (0)
;
290 break;
291 case ATOM_ARG_IMM5:
292 switch (align) {
293 case ATOM_SRC_DWORD0:
294 val = U32(*ptr)get_u32(ctx->ctx->bios, (*ptr));
295 (*ptr) += 4;
296 if (print)
297 DEBUG("IMM 0x%08X\n", val)do if (amdgpu_atom_debug) { printk("\0017" "IMM 0x%08X\n", val
); } while (0)
;
298 return val;
299 case ATOM_SRC_WORD01:
300 case ATOM_SRC_WORD82:
301 case ATOM_SRC_WORD163:
302 val = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
303 (*ptr) += 2;
304 if (print)
305 DEBUG("IMM 0x%04X\n", val)do if (amdgpu_atom_debug) { printk("\0017" "IMM 0x%04X\n", val
); } while (0)
;
306 return val;
307 case ATOM_SRC_BYTE04:
308 case ATOM_SRC_BYTE85:
309 case ATOM_SRC_BYTE166:
310 case ATOM_SRC_BYTE247:
311 val = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
312 (*ptr)++;
313 if (print)
314 DEBUG("IMM 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017" "IMM 0x%02X\n", val
); } while (0)
;
315 return val;
316 }
317 return 0;
318 case ATOM_ARG_PLL6:
319 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
320 (*ptr)++;
321 if (print)
322 DEBUG("PLL[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "PLL[0x%02X]", idx
); } while (0)
;
323 val = gctx->card->pll_read(gctx->card, idx);
324 break;
325 case ATOM_ARG_MC7:
326 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
327 (*ptr)++;
328 if (print)
329 DEBUG("MC[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "MC[0x%02X]", idx)
; } while (0)
;
330 val = gctx->card->mc_read(gctx->card, idx);
331 break;
332 }
333 if (saved)
334 *saved = val;
335 val &= atom_arg_mask[align];
336 val >>= atom_arg_shift[align];
337 if (print)
338 switch (align) {
339 case ATOM_SRC_DWORD0:
340 DEBUG(".[31:0] -> 0x%08X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[31:0] -> 0x%08X\n"
, val); } while (0)
;
341 break;
342 case ATOM_SRC_WORD01:
343 DEBUG(".[15:0] -> 0x%04X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[15:0] -> 0x%04X\n"
, val); } while (0)
;
344 break;
345 case ATOM_SRC_WORD82:
346 DEBUG(".[23:8] -> 0x%04X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[23:8] -> 0x%04X\n"
, val); } while (0)
;
347 break;
348 case ATOM_SRC_WORD163:
349 DEBUG(".[31:16] -> 0x%04X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[31:16] -> 0x%04X\n"
, val); } while (0)
;
350 break;
351 case ATOM_SRC_BYTE04:
352 DEBUG(".[7:0] -> 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[7:0] -> 0x%02X\n"
, val); } while (0)
;
353 break;
354 case ATOM_SRC_BYTE85:
355 DEBUG(".[15:8] -> 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[15:8] -> 0x%02X\n"
, val); } while (0)
;
356 break;
357 case ATOM_SRC_BYTE166:
358 DEBUG(".[23:16] -> 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[23:16] -> 0x%02X\n"
, val); } while (0)
;
359 break;
360 case ATOM_SRC_BYTE247:
361 DEBUG(".[31:24] -> 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[31:24] -> 0x%02X\n"
, val); } while (0)
;
362 break;
363 }
364 return val;
365}
366
367static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
368{
369 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
370 switch (arg) {
371 case ATOM_ARG_REG0:
372 case ATOM_ARG_ID4:
373 (*ptr) += 2;
374 break;
375 case ATOM_ARG_PLL6:
376 case ATOM_ARG_MC7:
377 case ATOM_ARG_PS1:
378 case ATOM_ARG_WS2:
379 case ATOM_ARG_FB3:
380 (*ptr)++;
381 break;
382 case ATOM_ARG_IMM5:
383 switch (align) {
384 case ATOM_SRC_DWORD0:
385 (*ptr) += 4;
386 return;
387 case ATOM_SRC_WORD01:
388 case ATOM_SRC_WORD82:
389 case ATOM_SRC_WORD163:
390 (*ptr) += 2;
391 return;
392 case ATOM_SRC_BYTE04:
393 case ATOM_SRC_BYTE85:
394 case ATOM_SRC_BYTE166:
395 case ATOM_SRC_BYTE247:
396 (*ptr)++;
397 return;
398 }
399 return;
400 }
401}
402
403static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
404{
405 return atom_get_src_int(ctx, attr, ptr, NULL((void *)0), 1);
406}
407
408static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
409{
410 uint32_t val = 0xCDCDCDCD;
411
412 switch (align) {
413 case ATOM_SRC_DWORD0:
414 val = U32(*ptr)get_u32(ctx->ctx->bios, (*ptr));
415 (*ptr) += 4;
416 break;
417 case ATOM_SRC_WORD01:
418 case ATOM_SRC_WORD82:
419 case ATOM_SRC_WORD163:
420 val = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
421 (*ptr) += 2;
422 break;
423 case ATOM_SRC_BYTE04:
424 case ATOM_SRC_BYTE85:
425 case ATOM_SRC_BYTE166:
426 case ATOM_SRC_BYTE247:
427 val = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
428 (*ptr)++;
429 break;
430 }
431 return val;
432}
433
434static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
435 int *ptr, uint32_t *saved, int print)
436{
437 return atom_get_src_int(ctx,
438 arg | atom_dst_to_src[(attr >> 3) &
439 7][(attr >> 6) & 3] << 3,
440 ptr, saved, print);
441}
442
443static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
444{
445 atom_skip_src_int(ctx,
446 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
447 3] << 3, ptr);
448}
449
450static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
451 int *ptr, uint32_t val, uint32_t saved)
452{
453 uint32_t align =
454 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
455 val, idx;
456 struct atom_context *gctx = ctx->ctx;
457 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
458 val <<= atom_arg_shift[align];
459 val &= atom_arg_mask[align];
460 saved &= ~atom_arg_mask[align];
461 val |= saved;
462 switch (arg) {
463 case ATOM_ARG_REG0:
464 idx = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
465 (*ptr) += 2;
466 DEBUG("REG[0x%04X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "REG[0x%04X]", idx
); } while (0)
;
467 idx += gctx->reg_block;
468 switch (gctx->io_mode) {
469 case ATOM_IO_MM0:
470 if (idx == 0)
471 gctx->card->reg_write(gctx->card, idx,
472 val << 2);
473 else
474 gctx->card->reg_write(gctx->card, idx, val);
475 break;
476 case ATOM_IO_PCI1:
477 pr_info("PCI registers are not implemented\n")do { } while(0);
478 return;
479 case ATOM_IO_SYSIO2:
480 pr_info("SYSIO registers are not implemented\n")do { } while(0);
481 return;
482 default:
483 if (!(gctx->io_mode & 0x80)) {
484 pr_info("Bad IO mode\n")do { } while(0);
485 return;
486 }
487 if (!gctx->iio[gctx->io_mode & 0xFF]) {
488 pr_info("Undefined indirect IO write method %d\n",do { } while(0)
489 gctx->io_mode & 0x7F)do { } while(0);
490 return;
491 }
492 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
493 idx, val);
494 }
495 break;
496 case ATOM_ARG_PS1:
497 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
498 (*ptr)++;
499 DEBUG("PS[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "PS[0x%02X]", idx)
; } while (0)
;
500 ctx->ps[idx] = cpu_to_le32(val)((__uint32_t)(val));
501 break;
502 case ATOM_ARG_WS2:
503 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
504 (*ptr)++;
505 DEBUG("WS[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "WS[0x%02X]", idx)
; } while (0)
;
506 switch (idx) {
507 case ATOM_WS_QUOTIENT0x40:
508 gctx->divmul[0] = val;
509 break;
510 case ATOM_WS_REMAINDER0x41:
511 gctx->divmul[1] = val;
512 break;
513 case ATOM_WS_DATAPTR0x42:
514 gctx->data_block = val;
515 break;
516 case ATOM_WS_SHIFT0x43:
517 gctx->shift = val;
518 break;
519 case ATOM_WS_OR_MASK0x44:
520 case ATOM_WS_AND_MASK0x45:
521 break;
522 case ATOM_WS_FB_WINDOW0x46:
523 gctx->fb_base = val;
524 break;
525 case ATOM_WS_ATTRIBUTES0x47:
526 gctx->io_attr = val;
527 break;
528 case ATOM_WS_REGPTR0x48:
529 gctx->reg_block = val;
530 break;
531 default:
532 ctx->ws[idx] = val;
533 }
534 break;
535 case ATOM_ARG_FB3:
536 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
537 (*ptr)++;
538 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
539 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",__drm_err("ATOM: fb write beyond scratch region: %d vs. %d\n"
, gctx->fb_base + (idx * 4), gctx->scratch_size_bytes)
540 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes)__drm_err("ATOM: fb write beyond scratch region: %d vs. %d\n"
, gctx->fb_base + (idx * 4), gctx->scratch_size_bytes)
;
541 } else
542 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
543 DEBUG("FB[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "FB[0x%02X]", idx)
; } while (0)
;
544 break;
545 case ATOM_ARG_PLL6:
546 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
547 (*ptr)++;
548 DEBUG("PLL[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "PLL[0x%02X]", idx
); } while (0)
;
549 gctx->card->pll_write(gctx->card, idx, val);
550 break;
551 case ATOM_ARG_MC7:
552 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
553 (*ptr)++;
554 DEBUG("MC[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "MC[0x%02X]", idx)
; } while (0)
;
555 gctx->card->mc_write(gctx->card, idx, val);
556 return;
557 }
558 switch (align) {
559 case ATOM_SRC_DWORD0:
560 DEBUG(".[31:0] <- 0x%08X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[31:0] <- 0x%08X\n"
, old_val); } while (0)
;
561 break;
562 case ATOM_SRC_WORD01:
563 DEBUG(".[15:0] <- 0x%04X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[15:0] <- 0x%04X\n"
, old_val); } while (0)
;
564 break;
565 case ATOM_SRC_WORD82:
566 DEBUG(".[23:8] <- 0x%04X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[23:8] <- 0x%04X\n"
, old_val); } while (0)
;
567 break;
568 case ATOM_SRC_WORD163:
569 DEBUG(".[31:16] <- 0x%04X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[31:16] <- 0x%04X\n"
, old_val); } while (0)
;
570 break;
571 case ATOM_SRC_BYTE04:
572 DEBUG(".[7:0] <- 0x%02X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[7:0] <- 0x%02X\n"
, old_val); } while (0)
;
573 break;
574 case ATOM_SRC_BYTE85:
575 DEBUG(".[15:8] <- 0x%02X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[15:8] <- 0x%02X\n"
, old_val); } while (0)
;
576 break;
577 case ATOM_SRC_BYTE166:
578 DEBUG(".[23:16] <- 0x%02X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[23:16] <- 0x%02X\n"
, old_val); } while (0)
;
579 break;
580 case ATOM_SRC_BYTE247:
581 DEBUG(".[31:24] <- 0x%02X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[31:24] <- 0x%02X\n"
, old_val); } while (0)
;
582 break;
583 }
584}
585
586static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
587{
588 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
589 uint32_t dst, src, saved;
590 int dptr = *ptr;
591 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
592 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
593 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
594 src = atom_get_src(ctx, attr, ptr);
595 dst += src;
596 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
597 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
598}
599
600static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
601{
602 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
603 uint32_t dst, src, saved;
604 int dptr = *ptr;
605 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
606 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
607 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
608 src = atom_get_src(ctx, attr, ptr);
609 dst &= src;
610 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
611 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
612}
613
614static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
615{
616 printk("ATOM BIOS beeped!\n");
617}
618
619static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
620{
621 int idx = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
622 int r = 0;
623
624 if (idx < ATOM_TABLE_NAMES_CNT74)
625 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx])do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" table: %d (%s)\n", idx, atom_table_names
[idx]); } while (0)
;
626 else
627 SDEBUG(" table: %d\n", idx)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" table: %d\n", idx); } while (0)
;
628 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)get_u16(ctx->ctx->bios, (ctx->ctx->cmd_table + 4 +
2 * idx))
)
629 r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
630 if (r) {
631 ctx->abort = true1;
632 }
633}
634
635static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
636{
637 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
638 uint32_t saved;
639 int dptr = *ptr;
640 attr &= 0x38;
641 attr |= atom_def_dst[attr >> 3] << 6;
642 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
643 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
644 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
645}
646
647static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
648{
649 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
650 uint32_t dst, src;
651 SDEBUG(" src1: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src1: "); } while (0)
;
652 dst = atom_get_dst(ctx, arg, attr, ptr, NULL((void *)0), 1);
653 SDEBUG(" src2: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src2: "); } while (0)
;
654 src = atom_get_src(ctx, attr, ptr);
655 ctx->ctx->cs_equal = (dst == src);
656 ctx->ctx->cs_above = (dst > src);
657 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" result: %s %s\n", ctx->ctx->cs_equal
? "EQ" : "NE", ctx->ctx->cs_above ? "GT" : "LE"); } while
(0)
658 ctx->ctx->cs_above ? "GT" : "LE")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" result: %s %s\n", ctx->ctx->cs_equal
? "EQ" : "NE", ctx->ctx->cs_above ? "GT" : "LE"); } while
(0)
;
659}
660
661static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
662{
663 unsigned count = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
664 SDEBUG(" count: %d\n", count)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" count: %d\n", count); } while (0)
;
665 if (arg == ATOM_UNIT_MICROSEC0)
666 udelay(count);
667 else if (!drm_can_sleep())
668 mdelay(count);
669 else
670 drm_msleep(count)mdelay(count);
671}
672
673static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
674{
675 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
676 uint32_t dst, src;
677 SDEBUG(" src1: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src1: "); } while (0)
;
678 dst = atom_get_dst(ctx, arg, attr, ptr, NULL((void *)0), 1);
679 SDEBUG(" src2: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src2: "); } while (0)
;
680 src = atom_get_src(ctx, attr, ptr);
681 if (src != 0) {
682 ctx->ctx->divmul[0] = dst / src;
683 ctx->ctx->divmul[1] = dst % src;
684 } else {
685 ctx->ctx->divmul[0] = 0;
686 ctx->ctx->divmul[1] = 0;
687 }
688}
689
690static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
691{
692 uint64_t val64;
693 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
694 uint32_t dst, src;
695 SDEBUG(" src1: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src1: "); } while (0)
;
696 dst = atom_get_dst(ctx, arg, attr, ptr, NULL((void *)0), 1);
697 SDEBUG(" src2: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src2: "); } while (0)
;
698 src = atom_get_src(ctx, attr, ptr);
699 if (src != 0) {
700 val64 = dst;
701 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
702 do_div(val64, src)({ uint32_t __base = (src); uint32_t __rem = ((uint64_t)(val64
)) % __base; (val64) = ((uint64_t)(val64)) / __base; __rem; }
)
;
703 ctx->ctx->divmul[0] = lower_32_bits(val64)((u32)(val64));
704 ctx->ctx->divmul[1] = upper_32_bits(val64)((u32)(((val64) >> 16) >> 16));
705 } else {
706 ctx->ctx->divmul[0] = 0;
707 ctx->ctx->divmul[1] = 0;
708 }
709}
710
711static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
712{
713 /* functionally, a nop */
714}
715
716static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
717{
718 int execute = 0, target = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
719 unsigned long cjiffies;
720
721 (*ptr) += 2;
722 switch (arg) {
723 case ATOM_COND_ABOVE0:
724 execute = ctx->ctx->cs_above;
725 break;
726 case ATOM_COND_ABOVEOREQUAL1:
727 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
728 break;
729 case ATOM_COND_ALWAYS2:
730 execute = 1;
731 break;
732 case ATOM_COND_BELOW3:
733 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
734 break;
735 case ATOM_COND_BELOWOREQUAL4:
736 execute = !ctx->ctx->cs_above;
737 break;
738 case ATOM_COND_EQUAL5:
739 execute = ctx->ctx->cs_equal;
740 break;
741 case ATOM_COND_NOTEQUAL6:
742 execute = !ctx->ctx->cs_equal;
743 break;
744 }
745 if (arg != ATOM_COND_ALWAYS2)
746 SDEBUG(" taken: %s\n", execute ? "yes" : "no")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" taken: %s\n", execute ? "yes" : "no"
); } while (0)
;
747 SDEBUG(" target: 0x%04X\n", target)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" target: 0x%04X\n", target); } while
(0)
;
748 if (execute) {
749 if (ctx->last_jump == (ctx->start + target)) {
750 cjiffies = jiffies;
751 if (time_after(cjiffies, ctx->last_jump_jiffies)((long)(ctx->last_jump_jiffies) - (long)(cjiffies) < 0)) {
752 cjiffies -= ctx->last_jump_jiffies;
753 if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC20*1000)) {
754 DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",__drm_err("atombios stuck in loop for more than %dsecs aborting\n"
, 20)
755 ATOM_CMD_TIMEOUT_SEC)__drm_err("atombios stuck in loop for more than %dsecs aborting\n"
, 20)
;
756 ctx->abort = true1;
757 }
758 } else {
759 /* jiffies wrap around we will just wait a little longer */
760 ctx->last_jump_jiffies = jiffies;
761 }
762 } else {
763 ctx->last_jump = ctx->start + target;
764 ctx->last_jump_jiffies = jiffies;
765 }
766 *ptr = ctx->start + target;
767 }
768}
769
770static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
771{
772 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
773 uint32_t dst, mask, src, saved;
774 int dptr = *ptr;
775 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
776 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
777 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
778 SDEBUG(" mask: 0x%08x", mask)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" mask: 0x%08x", mask); } while (0)
;
779 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
780 src = atom_get_src(ctx, attr, ptr);
781 dst &= mask;
782 dst |= src;
783 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
784 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
785}
786
787static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
788{
789 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
790 uint32_t src, saved;
791 int dptr = *ptr;
792 if (((attr >> 3) & 7) != ATOM_SRC_DWORD0)
793 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
794 else {
795 atom_skip_dst(ctx, arg, attr, ptr);
796 saved = 0xCDCDCDCD;
797 }
798 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
799 src = atom_get_src(ctx, attr, ptr);
800 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
801 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
802}
803
804static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
805{
806 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
807 uint32_t dst, src;
808 SDEBUG(" src1: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src1: "); } while (0)
;
809 dst = atom_get_dst(ctx, arg, attr, ptr, NULL((void *)0), 1);
810 SDEBUG(" src2: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src2: "); } while (0)
;
811 src = atom_get_src(ctx, attr, ptr);
812 ctx->ctx->divmul[0] = dst * src;
813}
814
815static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
816{
817 uint64_t val64;
818 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
819 uint32_t dst, src;
820 SDEBUG(" src1: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src1: "); } while (0)
;
821 dst = atom_get_dst(ctx, arg, attr, ptr, NULL((void *)0), 1);
822 SDEBUG(" src2: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src2: "); } while (0)
;
823 src = atom_get_src(ctx, attr, ptr);
824 val64 = (uint64_t)dst * (uint64_t)src;
825 ctx->ctx->divmul[0] = lower_32_bits(val64)((u32)(val64));
826 ctx->ctx->divmul[1] = upper_32_bits(val64)((u32)(((val64) >> 16) >> 16));
827}
828
829static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
830{
831 /* nothing */
832}
833
834static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
835{
836 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
837 uint32_t dst, src, saved;
838 int dptr = *ptr;
839 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
840 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
841 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
842 src = atom_get_src(ctx, attr, ptr);
843 dst |= src;
844 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
845 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
846}
847
848static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
849{
850 uint8_t val = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
851 SDEBUG("POST card output: 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk("POST card output: 0x%02X\n", val); } while
(0)
;
852}
853
854static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
855{
856 pr_info("unimplemented!\n")do { } while(0);
857}
858
859static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
860{
861 pr_info("unimplemented!\n")do { } while(0);
862}
863
864static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
865{
866 pr_info("unimplemented!\n")do { } while(0);
867}
868
869static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
870{
871 int idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
872 (*ptr)++;
873 SDEBUG(" block: %d\n", idx)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" block: %d\n", idx); } while (0)
;
874 if (!idx)
875 ctx->ctx->data_block = 0;
876 else if (idx == 255)
877 ctx->ctx->data_block = ctx->start;
878 else
879 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx)get_u16(ctx->ctx->bios, (ctx->ctx->data_table + 4
+ 2 * idx))
;
880 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" base: 0x%04X\n", ctx->ctx->data_block
); } while (0)
;
881}
882
883static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
884{
885 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
886 SDEBUG(" fb_base: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" fb_base: "); } while (0)
;
887 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
888}
889
890static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
891{
892 int port;
893 switch (arg) {
894 case ATOM_PORT_ATI0:
895 port = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
896 if (port < ATOM_IO_NAMES_CNT5)
897 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port])do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" port: %d (%s)\n", port, atom_io_names
[port]); } while (0)
;
898 else
899 SDEBUG(" port: %d\n", port)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" port: %d\n", port); } while (0)
;
900 if (!port)
901 ctx->ctx->io_mode = ATOM_IO_MM0;
902 else
903 ctx->ctx->io_mode = ATOM_IO_IIO0x80 | port;
904 (*ptr) += 2;
905 break;
906 case ATOM_PORT_PCI1:
907 ctx->ctx->io_mode = ATOM_IO_PCI1;
908 (*ptr)++;
909 break;
910 case ATOM_PORT_SYSIO2:
911 ctx->ctx->io_mode = ATOM_IO_SYSIO2;
912 (*ptr)++;
913 break;
914 }
915}
916
917static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
918{
919 ctx->ctx->reg_block = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
920 (*ptr) += 2;
921 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" base: 0x%04X\n", ctx->ctx->reg_block
); } while (0)
;
922}
923
924static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
925{
926 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++)), shift;
927 uint32_t saved, dst;
928 int dptr = *ptr;
929 attr &= 0x38;
930 attr |= atom_def_dst[attr >> 3] << 6;
931 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
932 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
933 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE04, ptr);
934 SDEBUG(" shift: %d\n", shift)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" shift: %d\n", shift); } while (0)
;
935 dst <<= shift;
936 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
937 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
938}
939
940static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
941{
942 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++)), shift;
943 uint32_t saved, dst;
944 int dptr = *ptr;
945 attr &= 0x38;
946 attr |= atom_def_dst[attr >> 3] << 6;
947 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
948 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
949 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE04, ptr);
950 SDEBUG(" shift: %d\n", shift)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" shift: %d\n", shift); } while (0)
;
951 dst >>= shift;
952 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
953 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
954}
955
956static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
957{
958 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++)), shift;
959 uint32_t saved, dst;
960 int dptr = *ptr;
961 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
962 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
963 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
Value stored to 'dst' is never read
964 /* op needs to full dst value */
965 dst = saved;
966 shift = atom_get_src(ctx, attr, ptr);
967 SDEBUG(" shift: %d\n", shift)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" shift: %d\n", shift); } while (0)
;
968 dst <<= shift;
969 dst &= atom_arg_mask[dst_align];
970 dst >>= atom_arg_shift[dst_align];
971 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
972 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
973}
974
975static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
976{
977 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++)), shift;
978 uint32_t saved, dst;
979 int dptr = *ptr;
980 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
981 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
982 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
983 /* op needs to full dst value */
984 dst = saved;
985 shift = atom_get_src(ctx, attr, ptr);
986 SDEBUG(" shift: %d\n", shift)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" shift: %d\n", shift); } while (0)
;
987 dst >>= shift;
988 dst &= atom_arg_mask[dst_align];
989 dst >>= atom_arg_shift[dst_align];
990 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
991 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
992}
993
994static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
995{
996 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
997 uint32_t dst, src, saved;
998 int dptr = *ptr;
999 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
1000 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1001 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
1002 src = atom_get_src(ctx, attr, ptr);
1003 dst -= src;
1004 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
1005 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1006}
1007
1008static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1009{
1010 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
1011 uint32_t src, val, target;
1012 SDEBUG(" switch: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" switch: "); } while (0)
;
1013 src = atom_get_src(ctx, attr, ptr);
1014 while (U16(*ptr)get_u16(ctx->ctx->bios, (*ptr)) != ATOM_CASE_END0x5A5A)
1015 if (U8(*ptr)get_u8(ctx->ctx->bios, (*ptr)) == ATOM_CASE_MAGIC0x63) {
1016 (*ptr)++;
1017 SDEBUG(" case: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" case: "); } while (0)
;
1018 val =
1019 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM5,
1020 ptr);
1021 target = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
1022 if (val == src) {
1023 SDEBUG(" target: %04X\n", target)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" target: %04X\n", target); } while (
0)
;
1024 *ptr = ctx->start + target;
1025 return;
1026 }
1027 (*ptr) += 2;
1028 } else {
1029 pr_info("Bad case\n")do { } while(0);
1030 return;
1031 }
1032 (*ptr) += 2;
1033}
1034
1035static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1036{
1037 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
1038 uint32_t dst, src;
1039 SDEBUG(" src1: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src1: "); } while (0)
;
1040 dst = atom_get_dst(ctx, arg, attr, ptr, NULL((void *)0), 1);
1041 SDEBUG(" src2: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src2: "); } while (0)
;
1042 src = atom_get_src(ctx, attr, ptr);
1043 ctx->ctx->cs_equal = ((dst & src) == 0);
1044 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" result: %s\n", ctx->ctx->cs_equal
? "EQ" : "NE"); } while (0)
;
1045}
1046
1047static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1048{
1049 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
1050 uint32_t dst, src, saved;
1051 int dptr = *ptr;
1052 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
1053 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1054 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
1055 src = atom_get_src(ctx, attr, ptr);
1056 dst ^= src;
1057 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
1058 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1059}
1060
1061static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1062{
1063 uint8_t val = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
1064 SDEBUG("DEBUG output: 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk("DEBUG output: 0x%02X\n", val); } while
(0)
;
1065}
1066
1067static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1068{
1069 uint16_t val = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
1070 (*ptr) += val + 2;
1071 SDEBUG("PROCESSDS output: 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk("PROCESSDS output: 0x%02X\n", val); } while
(0)
;
1072}
1073
1074static struct {
1075 void (*func) (atom_exec_context *, int *, int);
1076 int arg;
1077} opcode_table[ATOM_OP_CNT127] = {
1078 {
1079 NULL((void *)0), 0}, {
1080 atom_op_move, ATOM_ARG_REG0}, {
1081 atom_op_move, ATOM_ARG_PS1}, {
1082 atom_op_move, ATOM_ARG_WS2}, {
1083 atom_op_move, ATOM_ARG_FB3}, {
1084 atom_op_move, ATOM_ARG_PLL6}, {
1085 atom_op_move, ATOM_ARG_MC7}, {
1086 atom_op_and, ATOM_ARG_REG0}, {
1087 atom_op_and, ATOM_ARG_PS1}, {
1088 atom_op_and, ATOM_ARG_WS2}, {
1089 atom_op_and, ATOM_ARG_FB3}, {
1090 atom_op_and, ATOM_ARG_PLL6}, {
1091 atom_op_and, ATOM_ARG_MC7}, {
1092 atom_op_or, ATOM_ARG_REG0}, {
1093 atom_op_or, ATOM_ARG_PS1}, {
1094 atom_op_or, ATOM_ARG_WS2}, {
1095 atom_op_or, ATOM_ARG_FB3}, {
1096 atom_op_or, ATOM_ARG_PLL6}, {
1097 atom_op_or, ATOM_ARG_MC7}, {
1098 atom_op_shift_left, ATOM_ARG_REG0}, {
1099 atom_op_shift_left, ATOM_ARG_PS1}, {
1100 atom_op_shift_left, ATOM_ARG_WS2}, {
1101 atom_op_shift_left, ATOM_ARG_FB3}, {
1102 atom_op_shift_left, ATOM_ARG_PLL6}, {
1103 atom_op_shift_left, ATOM_ARG_MC7}, {
1104 atom_op_shift_right, ATOM_ARG_REG0}, {
1105 atom_op_shift_right, ATOM_ARG_PS1}, {
1106 atom_op_shift_right, ATOM_ARG_WS2}, {
1107 atom_op_shift_right, ATOM_ARG_FB3}, {
1108 atom_op_shift_right, ATOM_ARG_PLL6}, {
1109 atom_op_shift_right, ATOM_ARG_MC7}, {
1110 atom_op_mul, ATOM_ARG_REG0}, {
1111 atom_op_mul, ATOM_ARG_PS1}, {
1112 atom_op_mul, ATOM_ARG_WS2}, {
1113 atom_op_mul, ATOM_ARG_FB3}, {
1114 atom_op_mul, ATOM_ARG_PLL6}, {
1115 atom_op_mul, ATOM_ARG_MC7}, {
1116 atom_op_div, ATOM_ARG_REG0}, {
1117 atom_op_div, ATOM_ARG_PS1}, {
1118 atom_op_div, ATOM_ARG_WS2}, {
1119 atom_op_div, ATOM_ARG_FB3}, {
1120 atom_op_div, ATOM_ARG_PLL6}, {
1121 atom_op_div, ATOM_ARG_MC7}, {
1122 atom_op_add, ATOM_ARG_REG0}, {
1123 atom_op_add, ATOM_ARG_PS1}, {
1124 atom_op_add, ATOM_ARG_WS2}, {
1125 atom_op_add, ATOM_ARG_FB3}, {
1126 atom_op_add, ATOM_ARG_PLL6}, {
1127 atom_op_add, ATOM_ARG_MC7}, {
1128 atom_op_sub, ATOM_ARG_REG0}, {
1129 atom_op_sub, ATOM_ARG_PS1}, {
1130 atom_op_sub, ATOM_ARG_WS2}, {
1131 atom_op_sub, ATOM_ARG_FB3}, {
1132 atom_op_sub, ATOM_ARG_PLL6}, {
1133 atom_op_sub, ATOM_ARG_MC7}, {
1134 atom_op_setport, ATOM_PORT_ATI0}, {
1135 atom_op_setport, ATOM_PORT_PCI1}, {
1136 atom_op_setport, ATOM_PORT_SYSIO2}, {
1137 atom_op_setregblock, 0}, {
1138 atom_op_setfbbase, 0}, {
1139 atom_op_compare, ATOM_ARG_REG0}, {
1140 atom_op_compare, ATOM_ARG_PS1}, {
1141 atom_op_compare, ATOM_ARG_WS2}, {
1142 atom_op_compare, ATOM_ARG_FB3}, {
1143 atom_op_compare, ATOM_ARG_PLL6}, {
1144 atom_op_compare, ATOM_ARG_MC7}, {
1145 atom_op_switch, 0}, {
1146 atom_op_jump, ATOM_COND_ALWAYS2}, {
1147 atom_op_jump, ATOM_COND_EQUAL5}, {
1148 atom_op_jump, ATOM_COND_BELOW3}, {
1149 atom_op_jump, ATOM_COND_ABOVE0}, {
1150 atom_op_jump, ATOM_COND_BELOWOREQUAL4}, {
1151 atom_op_jump, ATOM_COND_ABOVEOREQUAL1}, {
1152 atom_op_jump, ATOM_COND_NOTEQUAL6}, {
1153 atom_op_test, ATOM_ARG_REG0}, {
1154 atom_op_test, ATOM_ARG_PS1}, {
1155 atom_op_test, ATOM_ARG_WS2}, {
1156 atom_op_test, ATOM_ARG_FB3}, {
1157 atom_op_test, ATOM_ARG_PLL6}, {
1158 atom_op_test, ATOM_ARG_MC7}, {
1159 atom_op_delay, ATOM_UNIT_MILLISEC1}, {
1160 atom_op_delay, ATOM_UNIT_MICROSEC0}, {
1161 atom_op_calltable, 0}, {
1162 atom_op_repeat, 0}, {
1163 atom_op_clear, ATOM_ARG_REG0}, {
1164 atom_op_clear, ATOM_ARG_PS1}, {
1165 atom_op_clear, ATOM_ARG_WS2}, {
1166 atom_op_clear, ATOM_ARG_FB3}, {
1167 atom_op_clear, ATOM_ARG_PLL6}, {
1168 atom_op_clear, ATOM_ARG_MC7}, {
1169 atom_op_nop, 0}, {
1170 atom_op_eot, 0}, {
1171 atom_op_mask, ATOM_ARG_REG0}, {
1172 atom_op_mask, ATOM_ARG_PS1}, {
1173 atom_op_mask, ATOM_ARG_WS2}, {
1174 atom_op_mask, ATOM_ARG_FB3}, {
1175 atom_op_mask, ATOM_ARG_PLL6}, {
1176 atom_op_mask, ATOM_ARG_MC7}, {
1177 atom_op_postcard, 0}, {
1178 atom_op_beep, 0}, {
1179 atom_op_savereg, 0}, {
1180 atom_op_restorereg, 0}, {
1181 atom_op_setdatablock, 0}, {
1182 atom_op_xor, ATOM_ARG_REG0}, {
1183 atom_op_xor, ATOM_ARG_PS1}, {
1184 atom_op_xor, ATOM_ARG_WS2}, {
1185 atom_op_xor, ATOM_ARG_FB3}, {
1186 atom_op_xor, ATOM_ARG_PLL6}, {
1187 atom_op_xor, ATOM_ARG_MC7}, {
1188 atom_op_shl, ATOM_ARG_REG0}, {
1189 atom_op_shl, ATOM_ARG_PS1}, {
1190 atom_op_shl, ATOM_ARG_WS2}, {
1191 atom_op_shl, ATOM_ARG_FB3}, {
1192 atom_op_shl, ATOM_ARG_PLL6}, {
1193 atom_op_shl, ATOM_ARG_MC7}, {
1194 atom_op_shr, ATOM_ARG_REG0}, {
1195 atom_op_shr, ATOM_ARG_PS1}, {
1196 atom_op_shr, ATOM_ARG_WS2}, {
1197 atom_op_shr, ATOM_ARG_FB3}, {
1198 atom_op_shr, ATOM_ARG_PLL6}, {
1199 atom_op_shr, ATOM_ARG_MC7}, {
1200 atom_op_debug, 0}, {
1201 atom_op_processds, 0}, {
1202 atom_op_mul32, ATOM_ARG_PS1}, {
1203 atom_op_mul32, ATOM_ARG_WS2}, {
1204 atom_op_div32, ATOM_ARG_PS1}, {
1205 atom_op_div32, ATOM_ARG_WS2},
1206};
1207
1208static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1209{
1210 int base = CU16(ctx->cmd_table + 4 + 2 * index)get_u16(ctx->bios, (ctx->cmd_table + 4 + 2 * index));
1211 int len, ws, ps, ptr;
1212 unsigned char op;
1213 atom_exec_context ectx;
1214 int ret = 0;
1215
1216 if (!base)
1217 return -EINVAL22;
1218
1219 len = CU16(base + ATOM_CT_SIZE_PTR)get_u16(ctx->bios, (base + 0));
1220 ws = CU8(base + ATOM_CT_WS_PTR)get_u8(ctx->bios, (base + 4));
1221 ps = CU8(base + ATOM_CT_PS_PTR)get_u8(ctx->bios, (base + 5)) & ATOM_CT_PS_MASK0x7F;
1222 ptr = base + ATOM_CT_CODE_PTR6;
1223
1224 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(">> execute %04X (len %d, WS %d, PS %d)\n"
, base, len, ws, ps); } while (0)
;
1225
1226 ectx.ctx = ctx;
1227 ectx.ps_shift = ps / 4;
1228 ectx.start = base;
1229 ectx.ps = params;
1230 ectx.abort = false0;
1231 ectx.last_jump = 0;
1232 if (ws)
1233 ectx.ws = kcalloc(4, ws, GFP_KERNEL(0x0001 | 0x0004));
1234 else
1235 ectx.ws = NULL((void *)0);
1236
1237 debug_depth++;
1238 while (1) {
1239 op = CU8(ptr++)get_u8(ctx->bios, (ptr++));
1240 if (op < ATOM_OP_NAMES_CNT123)
1241 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk("%s @ 0x%04X\n", atom_op_names[op], ptr
- 1); } while (0)
;
1242 else
1243 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk("[%d] @ 0x%04X\n", op, ptr - 1); } while
(0)
;
1244 if (ectx.abort) {
1245 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",__drm_err("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n"
, base, len, ws, ps, ptr - 1)
1246 base, len, ws, ps, ptr - 1)__drm_err("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n"
, base, len, ws, ps, ptr - 1)
;
1247 ret = -EINVAL22;
1248 goto free;
1249 }
1250
1251 if (op < ATOM_OP_CNT127 && op > 0)
1252 opcode_table[op].func(&ectx, &ptr,
1253 opcode_table[op].arg);
1254 else
1255 break;
1256
1257 if (op == ATOM_OP_EOT91)
1258 break;
1259 }
1260 debug_depth--;
1261 SDEBUG("<<\n")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk("<<\n"); } while (0)
;
1262
1263free:
1264 if (ws)
1265 kfree(ectx.ws);
1266 return ret;
1267}
1268
1269int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1270{
1271 int r;
1272
1273 mutex_lock(&ctx->mutex)rw_enter_write(&ctx->mutex);
1274 /* reset data block */
1275 ctx->data_block = 0;
1276 /* reset reg block */
1277 ctx->reg_block = 0;
1278 /* reset fb window */
1279 ctx->fb_base = 0;
1280 /* reset io mode */
1281 ctx->io_mode = ATOM_IO_MM0;
1282 /* reset divmul */
1283 ctx->divmul[0] = 0;
1284 ctx->divmul[1] = 0;
1285 r = amdgpu_atom_execute_table_locked(ctx, index, params);
1286 mutex_unlock(&ctx->mutex)rw_exit_write(&ctx->mutex);
1287 return r;
1288}
1289
1290static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1291
1292static void atom_index_iio(struct atom_context *ctx, int base)
1293{
1294 ctx->iio = kzalloc(2 * 256, GFP_KERNEL(0x0001 | 0x0004));
1295 if (!ctx->iio)
1296 return;
1297 while (CU8(base)get_u8(ctx->bios, (base)) == ATOM_IIO_START1) {
1298 ctx->iio[CU8(base + 1)get_u8(ctx->bios, (base + 1))] = base + 2;
1299 base += 2;
1300 while (CU8(base)get_u8(ctx->bios, (base)) != ATOM_IIO_END9)
1301 base += atom_iio_len[CU8(base)get_u8(ctx->bios, (base))];
1302 base += 3;
1303 }
1304}
1305
1306struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1307{
1308 int base;
1309 struct atom_context *ctx =
1310 kzalloc(sizeof(struct atom_context), GFP_KERNEL(0x0001 | 0x0004));
1311 char *str;
1312 u16 idx;
1313
1314 if (!ctx)
1315 return NULL((void *)0);
1316
1317 ctx->card = card;
1318 ctx->bios = bios;
1319
1320 if (CU16(0)get_u16(ctx->bios, (0)) != ATOM_BIOS_MAGIC0xAA55) {
1321 pr_info("Invalid BIOS magic\n")do { } while(0);
1322 kfree(ctx);
1323 return NULL((void *)0);
1324 }
1325 if (strncmp
1326 (CSTR(ATOM_ATI_MAGIC_PTR)(((char *)(ctx->bios))+(0x30)), ATOM_ATI_MAGIC" 761295520",
1327 strlen(ATOM_ATI_MAGIC" 761295520"))) {
1328 pr_info("Invalid ATI magic\n")do { } while(0);
1329 kfree(ctx);
1330 return NULL((void *)0);
1331 }
1332
1333 base = CU16(ATOM_ROM_TABLE_PTR)get_u16(ctx->bios, (0x48));
1334 if (strncmp
1335 (CSTR(base + ATOM_ROM_MAGIC_PTR)(((char *)(ctx->bios))+(base + 4)), ATOM_ROM_MAGIC"ATOM",
1336 strlen(ATOM_ROM_MAGIC"ATOM"))) {
1337 pr_info("Invalid ATOM magic\n")do { } while(0);
1338 kfree(ctx);
1339 return NULL((void *)0);
1340 }
1341
1342 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR)get_u16(ctx->bios, (base + 0x1E));
1343 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR)get_u16(ctx->bios, (base + 0x20));
1344 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR)get_u16(ctx->bios, (ctx->data_table + 0x32)) + 4);
1345 if (!ctx->iio) {
1346 amdgpu_atom_destroy(ctx);
1347 return NULL((void *)0);
1348 }
1349
1350 idx = CU16(ATOM_ROM_PART_NUMBER_PTR)get_u16(ctx->bios, (0x6E));
1351 if (idx == 0)
1352 idx = 0x80;
1353
1354 str = CSTR(idx)(((char *)(ctx->bios))+(idx));
1355 if (*str != '\0') {
1356 pr_info("ATOM BIOS: %s\n", str)do { } while(0);
1357 strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
1358 }
1359
1360
1361 return ctx;
1362}
1363
1364int amdgpu_atom_asic_init(struct atom_context *ctx)
1365{
1366 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR)get_u16(ctx->bios, (ctx->data_table + 0xC));
1367 uint32_t ps[16];
1368 int ret;
1369
1370 memset(ps, 0, 64)__builtin_memset((ps), (0), (64));
1371
1372 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR))((__uint32_t)(get_u32(ctx->bios, (hwi + 8))));
1373 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR))((__uint32_t)(get_u32(ctx->bios, (hwi + 0xC))));
1374 if (!ps[0] || !ps[1])
1375 return 1;
1376
1377 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)get_u16(ctx->bios, (ctx->cmd_table + 4 + 2 * 0)))
1378 return 1;
1379 ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT0, ps);
1380 if (ret)
1381 return ret;
1382
1383 memset(ps, 0, 64)__builtin_memset((ps), (0), (64));
1384
1385 return ret;
1386}
1387
1388void amdgpu_atom_destroy(struct atom_context *ctx)
1389{
1390 kfree(ctx->iio);
1391 kfree(ctx);
1392}
1393
1394bool_Bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1395 uint16_t * size, uint8_t * frev, uint8_t * crev,
1396 uint16_t * data_start)
1397{
1398 int offset = index * 2 + 4;
1399 int idx = CU16(ctx->data_table + offset)get_u16(ctx->bios, (ctx->data_table + offset));
1400 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1401
1402 if (!mdt[index])
1403 return false0;
1404
1405 if (size)
1406 *size = CU16(idx)get_u16(ctx->bios, (idx));
1407 if (frev)
1408 *frev = CU8(idx + 2)get_u8(ctx->bios, (idx + 2));
1409 if (crev)
1410 *crev = CU8(idx + 3)get_u8(ctx->bios, (idx + 3));
1411 *data_start = idx;
1412 return true1;
1413}
1414
1415bool_Bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1416 uint8_t * crev)
1417{
1418 int offset = index * 2 + 4;
1419 int idx = CU16(ctx->cmd_table + offset)get_u16(ctx->bios, (ctx->cmd_table + offset));
1420 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1421
1422 if (!mct[index])
1423 return false0;
1424
1425 if (frev)
1426 *frev = CU8(idx + 2)get_u8(ctx->bios, (idx + 2));
1427 if (crev)
1428 *crev = CU8(idx + 3)get_u8(ctx->bios, (idx + 3));
1429 return true1;
1430}
1431