Bug Summary

File:dev/pci/drm/amd/amdgpu/amdgpu_atom.c
Warning:line 1392, column 2
Value stored to 'str_off' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name amdgpu_atom.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/pci/drm/amd/amdgpu/amdgpu_atom.c
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/string_helpers.h>
29
30#include <asm/unaligned.h>
31
32#include <drm/drm_util.h>
33
34#define ATOM_DEBUG
35
36#include "atomfirmware.h"
37#include "atom.h"
38#include "atom-names.h"
39#include "atom-bits.h"
40#include "amdgpu.h"
41
42#define ATOM_COND_ABOVE0 0
43#define ATOM_COND_ABOVEOREQUAL1 1
44#define ATOM_COND_ALWAYS2 2
45#define ATOM_COND_BELOW3 3
46#define ATOM_COND_BELOWOREQUAL4 4
47#define ATOM_COND_EQUAL5 5
48#define ATOM_COND_NOTEQUAL6 6
49
50#define ATOM_PORT_ATI0 0
51#define ATOM_PORT_PCI1 1
52#define ATOM_PORT_SYSIO2 2
53
54#define ATOM_UNIT_MICROSEC0 0
55#define ATOM_UNIT_MILLISEC1 1
56
57#define PLL_INDEX2 2
58#define PLL_DATA3 3
59
60#define ATOM_CMD_TIMEOUT_SEC20 20
61
62typedef struct {
63 struct atom_context *ctx;
64 uint32_t *ps, *ws;
65 int ps_shift;
66 uint16_t start;
67 unsigned last_jump;
68 unsigned long last_jump_jiffies;
69 bool_Bool abort;
70} atom_exec_context;
71
72int amdgpu_atom_debug;
73static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params);
74int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
75
76static uint32_t atom_arg_mask[8] =
77 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
78 0xFF000000 };
79static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
80
81static int atom_dst_to_src[8][4] = {
82 /* translate destination alignment field to the source alignment encoding */
83 {0, 0, 0, 0},
84 {1, 2, 3, 0},
85 {1, 2, 3, 0},
86 {1, 2, 3, 0},
87 {4, 5, 6, 7},
88 {4, 5, 6, 7},
89 {4, 5, 6, 7},
90 {4, 5, 6, 7},
91};
92static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
93
94static int debug_depth;
95#ifdef ATOM_DEBUG
96static void debug_print_spaces(int n)
97{
98 while (n--)
99 printk(" ");
100}
101
102#ifdef DEBUG
103#undef DEBUG
104#endif
105
106#define DEBUG(...)do if (amdgpu_atom_debug) { printk("\0017" ...); } while (0) do if (amdgpu_atom_debug) { printk(KERN_DEBUG"\0017" __VA_ARGS__); } while (0)
107#define SDEBUG(...)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(...); } while (0)
do if (amdgpu_atom_debug) { printk(KERN_DEBUG"\0017"); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
108#else
109#define DEBUG(...)do if (amdgpu_atom_debug) { printk("\0017" ...); } while (0) do { } while (0)
110#define SDEBUG(...)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(...); } while (0)
do { } while (0)
111#endif
112
113static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
114 uint32_t index, uint32_t data)
115{
116 uint32_t temp = 0xCDCDCDCD;
117
118 while (1)
119 switch (CU8(base)get_u8(ctx->bios, (base))) {
120 case ATOM_IIO_NOP0:
121 base++;
122 break;
123 case ATOM_IIO_READ2:
124 temp = ctx->card->reg_read(ctx->card, CU16(base + 1)get_u16(ctx->bios, (base + 1)));
125 base += 3;
126 break;
127 case ATOM_IIO_WRITE3:
128 ctx->card->reg_write(ctx->card, CU16(base + 1)get_u16(ctx->bios, (base + 1)), temp);
129 base += 3;
130 break;
131 case ATOM_IIO_CLEAR4:
132 temp &=
133 ~((0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1)))) <<
134 CU8(base + 2)get_u8(ctx->bios, (base + 2)));
135 base += 3;
136 break;
137 case ATOM_IIO_SET5:
138 temp |=
139 (0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1)))) << CU8(base +get_u8(ctx->bios, (base + 2))
140 2)get_u8(ctx->bios, (base + 2));
141 base += 3;
142 break;
143 case ATOM_IIO_MOVE_INDEX6:
144 temp &=
145 ~((0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1)))) <<
146 CU8(base + 3)get_u8(ctx->bios, (base + 3)));
147 temp |=
148 ((index >> CU8(base + 2)get_u8(ctx->bios, (base + 2))) &
149 (0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1))))) << CU8(base +get_u8(ctx->bios, (base + 3))
150 3)get_u8(ctx->bios, (base + 3));
151 base += 4;
152 break;
153 case ATOM_IIO_MOVE_DATA8:
154 temp &=
155 ~((0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1)))) <<
156 CU8(base + 3)get_u8(ctx->bios, (base + 3)));
157 temp |=
158 ((data >> CU8(base + 2)get_u8(ctx->bios, (base + 2))) &
159 (0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1))))) << CU8(base +get_u8(ctx->bios, (base + 3))
160 3)get_u8(ctx->bios, (base + 3));
161 base += 4;
162 break;
163 case ATOM_IIO_MOVE_ATTR7:
164 temp &=
165 ~((0xFFFFFFFF >> (32 - CU8(base + 1)get_u8(ctx->bios, (base + 1)))) <<
166 CU8(base + 3)get_u8(ctx->bios, (base + 3)));
167 temp |=
168 ((ctx->
169 io_attr >> CU8(base + 2)get_u8(ctx->bios, (base + 2))) & (0xFFFFFFFF >> (32 -
170 CU8get_u8(ctx->bios, (base + 1))
171 (baseget_u8(ctx->bios, (base + 1))
172 +get_u8(ctx->bios, (base + 1))
173 1)get_u8(ctx->bios, (base + 1)))))
174 << CU8(base + 3)get_u8(ctx->bios, (base + 3));
175 base += 4;
176 break;
177 case ATOM_IIO_END9:
178 return temp;
179 default:
180 pr_info("Unknown IIO opcode\n")do { } while(0);
181 return 0;
182 }
183}
184
185static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
186 int *ptr, uint32_t *saved, int print)
187{
188 uint32_t idx, val = 0xCDCDCDCD, align, arg;
189 struct atom_context *gctx = ctx->ctx;
190 arg = attr & 7;
191 align = (attr >> 3) & 7;
192 switch (arg) {
193 case ATOM_ARG_REG0:
194 idx = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
195 (*ptr) += 2;
196 if (print)
197 DEBUG("REG[0x%04X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "REG[0x%04X]", idx
); } while (0)
;
198 idx += gctx->reg_block;
199 switch (gctx->io_mode) {
200 case ATOM_IO_MM0:
201 val = gctx->card->reg_read(gctx->card, idx);
202 break;
203 case ATOM_IO_PCI1:
204 pr_info("PCI registers are not implemented\n")do { } while(0);
205 return 0;
206 case ATOM_IO_SYSIO2:
207 pr_info("SYSIO registers are not implemented\n")do { } while(0);
208 return 0;
209 default:
210 if (!(gctx->io_mode & 0x80)) {
211 pr_info("Bad IO mode\n")do { } while(0);
212 return 0;
213 }
214 if (!gctx->iio[gctx->io_mode & 0x7F]) {
215 pr_info("Undefined indirect IO read method %d\n",do { } while(0)
216 gctx->io_mode & 0x7F)do { } while(0);
217 return 0;
218 }
219 val =
220 atom_iio_execute(gctx,
221 gctx->iio[gctx->io_mode & 0x7F],
222 idx, 0);
223 }
224 break;
225 case ATOM_ARG_PS1:
226 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
227 (*ptr)++;
228 /* get_unaligned_le32 avoids unaligned accesses from atombios
229 * tables, noticed on a DEC Alpha. */
230 val = get_unaligned_le32((u32 *)&ctx->ps[idx])((__uint32_t)(*(__uint32_t *)((u32 *)&ctx->ps[idx])));
231 if (print)
232 DEBUG("PS[0x%02X,0x%04X]", idx, val)do if (amdgpu_atom_debug) { printk("\0017" "PS[0x%02X,0x%04X]"
, idx, val); } while (0)
;
233 break;
234 case ATOM_ARG_WS2:
235 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
236 (*ptr)++;
237 if (print)
238 DEBUG("WS[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "WS[0x%02X]", idx)
; } while (0)
;
239 switch (idx) {
240 case ATOM_WS_QUOTIENT0x40:
241 val = gctx->divmul[0];
242 break;
243 case ATOM_WS_REMAINDER0x41:
244 val = gctx->divmul[1];
245 break;
246 case ATOM_WS_DATAPTR0x42:
247 val = gctx->data_block;
248 break;
249 case ATOM_WS_SHIFT0x43:
250 val = gctx->shift;
251 break;
252 case ATOM_WS_OR_MASK0x44:
253 val = 1 << gctx->shift;
254 break;
255 case ATOM_WS_AND_MASK0x45:
256 val = ~(1 << gctx->shift);
257 break;
258 case ATOM_WS_FB_WINDOW0x46:
259 val = gctx->fb_base;
260 break;
261 case ATOM_WS_ATTRIBUTES0x47:
262 val = gctx->io_attr;
263 break;
264 case ATOM_WS_REGPTR0x48:
265 val = gctx->reg_block;
266 break;
267 default:
268 val = ctx->ws[idx];
269 }
270 break;
271 case ATOM_ARG_ID4:
272 idx = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
273 (*ptr) += 2;
274 if (print) {
275 if (gctx->data_block)
276 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block)do if (amdgpu_atom_debug) { printk("\0017" "ID[0x%04X+%04X]",
idx, gctx->data_block); } while (0)
;
277 else
278 DEBUG("ID[0x%04X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "ID[0x%04X]", idx)
; } while (0)
;
279 }
280 val = U32(idx + gctx->data_block)get_u32(ctx->ctx->bios, (idx + gctx->data_block));
281 break;
282 case ATOM_ARG_FB3:
283 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
284 (*ptr)++;
285 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
286 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",__drm_err("ATOM: fb read beyond scratch region: %d vs. %d\n",
gctx->fb_base + (idx * 4), gctx->scratch_size_bytes)
287 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes)__drm_err("ATOM: fb read beyond scratch region: %d vs. %d\n",
gctx->fb_base + (idx * 4), gctx->scratch_size_bytes)
;
288 val = 0;
289 } else
290 val = gctx->scratch[(gctx->fb_base / 4) + idx];
291 if (print)
292 DEBUG("FB[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "FB[0x%02X]", idx)
; } while (0)
;
293 break;
294 case ATOM_ARG_IMM5:
295 switch (align) {
296 case ATOM_SRC_DWORD0:
297 val = U32(*ptr)get_u32(ctx->ctx->bios, (*ptr));
298 (*ptr) += 4;
299 if (print)
300 DEBUG("IMM 0x%08X\n", val)do if (amdgpu_atom_debug) { printk("\0017" "IMM 0x%08X\n", val
); } while (0)
;
301 return val;
302 case ATOM_SRC_WORD01:
303 case ATOM_SRC_WORD82:
304 case ATOM_SRC_WORD163:
305 val = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
306 (*ptr) += 2;
307 if (print)
308 DEBUG("IMM 0x%04X\n", val)do if (amdgpu_atom_debug) { printk("\0017" "IMM 0x%04X\n", val
); } while (0)
;
309 return val;
310 case ATOM_SRC_BYTE04:
311 case ATOM_SRC_BYTE85:
312 case ATOM_SRC_BYTE166:
313 case ATOM_SRC_BYTE247:
314 val = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
315 (*ptr)++;
316 if (print)
317 DEBUG("IMM 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017" "IMM 0x%02X\n", val
); } while (0)
;
318 return val;
319 }
320 return 0;
321 case ATOM_ARG_PLL6:
322 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
323 (*ptr)++;
324 if (print)
325 DEBUG("PLL[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "PLL[0x%02X]", idx
); } while (0)
;
326 val = gctx->card->pll_read(gctx->card, idx);
327 break;
328 case ATOM_ARG_MC7:
329 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
330 (*ptr)++;
331 if (print)
332 DEBUG("MC[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "MC[0x%02X]", idx)
; } while (0)
;
333 val = gctx->card->mc_read(gctx->card, idx);
334 break;
335 }
336 if (saved)
337 *saved = val;
338 val &= atom_arg_mask[align];
339 val >>= atom_arg_shift[align];
340 if (print)
341 switch (align) {
342 case ATOM_SRC_DWORD0:
343 DEBUG(".[31:0] -> 0x%08X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[31:0] -> 0x%08X\n"
, val); } while (0)
;
344 break;
345 case ATOM_SRC_WORD01:
346 DEBUG(".[15:0] -> 0x%04X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[15:0] -> 0x%04X\n"
, val); } while (0)
;
347 break;
348 case ATOM_SRC_WORD82:
349 DEBUG(".[23:8] -> 0x%04X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[23:8] -> 0x%04X\n"
, val); } while (0)
;
350 break;
351 case ATOM_SRC_WORD163:
352 DEBUG(".[31:16] -> 0x%04X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[31:16] -> 0x%04X\n"
, val); } while (0)
;
353 break;
354 case ATOM_SRC_BYTE04:
355 DEBUG(".[7:0] -> 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[7:0] -> 0x%02X\n"
, val); } while (0)
;
356 break;
357 case ATOM_SRC_BYTE85:
358 DEBUG(".[15:8] -> 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[15:8] -> 0x%02X\n"
, val); } while (0)
;
359 break;
360 case ATOM_SRC_BYTE166:
361 DEBUG(".[23:16] -> 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[23:16] -> 0x%02X\n"
, val); } while (0)
;
362 break;
363 case ATOM_SRC_BYTE247:
364 DEBUG(".[31:24] -> 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017" ".[31:24] -> 0x%02X\n"
, val); } while (0)
;
365 break;
366 }
367 return val;
368}
369
370static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
371{
372 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
373 switch (arg) {
374 case ATOM_ARG_REG0:
375 case ATOM_ARG_ID4:
376 (*ptr) += 2;
377 break;
378 case ATOM_ARG_PLL6:
379 case ATOM_ARG_MC7:
380 case ATOM_ARG_PS1:
381 case ATOM_ARG_WS2:
382 case ATOM_ARG_FB3:
383 (*ptr)++;
384 break;
385 case ATOM_ARG_IMM5:
386 switch (align) {
387 case ATOM_SRC_DWORD0:
388 (*ptr) += 4;
389 return;
390 case ATOM_SRC_WORD01:
391 case ATOM_SRC_WORD82:
392 case ATOM_SRC_WORD163:
393 (*ptr) += 2;
394 return;
395 case ATOM_SRC_BYTE04:
396 case ATOM_SRC_BYTE85:
397 case ATOM_SRC_BYTE166:
398 case ATOM_SRC_BYTE247:
399 (*ptr)++;
400 return;
401 }
402 return;
403 }
404}
405
406static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
407{
408 return atom_get_src_int(ctx, attr, ptr, NULL((void *)0), 1);
409}
410
411static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
412{
413 uint32_t val = 0xCDCDCDCD;
414
415 switch (align) {
416 case ATOM_SRC_DWORD0:
417 val = U32(*ptr)get_u32(ctx->ctx->bios, (*ptr));
418 (*ptr) += 4;
419 break;
420 case ATOM_SRC_WORD01:
421 case ATOM_SRC_WORD82:
422 case ATOM_SRC_WORD163:
423 val = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
424 (*ptr) += 2;
425 break;
426 case ATOM_SRC_BYTE04:
427 case ATOM_SRC_BYTE85:
428 case ATOM_SRC_BYTE166:
429 case ATOM_SRC_BYTE247:
430 val = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
431 (*ptr)++;
432 break;
433 }
434 return val;
435}
436
437static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
438 int *ptr, uint32_t *saved, int print)
439{
440 return atom_get_src_int(ctx,
441 arg | atom_dst_to_src[(attr >> 3) &
442 7][(attr >> 6) & 3] << 3,
443 ptr, saved, print);
444}
445
446static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
447{
448 atom_skip_src_int(ctx,
449 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
450 3] << 3, ptr);
451}
452
453static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
454 int *ptr, uint32_t val, uint32_t saved)
455{
456 uint32_t align =
457 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
458 val, idx;
459 struct atom_context *gctx = ctx->ctx;
460 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
461 val <<= atom_arg_shift[align];
462 val &= atom_arg_mask[align];
463 saved &= ~atom_arg_mask[align];
464 val |= saved;
465 switch (arg) {
466 case ATOM_ARG_REG0:
467 idx = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
468 (*ptr) += 2;
469 DEBUG("REG[0x%04X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "REG[0x%04X]", idx
); } while (0)
;
470 idx += gctx->reg_block;
471 switch (gctx->io_mode) {
472 case ATOM_IO_MM0:
473 if (idx == 0)
474 gctx->card->reg_write(gctx->card, idx,
475 val << 2);
476 else
477 gctx->card->reg_write(gctx->card, idx, val);
478 break;
479 case ATOM_IO_PCI1:
480 pr_info("PCI registers are not implemented\n")do { } while(0);
481 return;
482 case ATOM_IO_SYSIO2:
483 pr_info("SYSIO registers are not implemented\n")do { } while(0);
484 return;
485 default:
486 if (!(gctx->io_mode & 0x80)) {
487 pr_info("Bad IO mode\n")do { } while(0);
488 return;
489 }
490 if (!gctx->iio[gctx->io_mode & 0xFF]) {
491 pr_info("Undefined indirect IO write method %d\n",do { } while(0)
492 gctx->io_mode & 0x7F)do { } while(0);
493 return;
494 }
495 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
496 idx, val);
497 }
498 break;
499 case ATOM_ARG_PS1:
500 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
501 (*ptr)++;
502 DEBUG("PS[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "PS[0x%02X]", idx)
; } while (0)
;
503 ctx->ps[idx] = cpu_to_le32(val)((__uint32_t)(val));
504 break;
505 case ATOM_ARG_WS2:
506 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
507 (*ptr)++;
508 DEBUG("WS[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "WS[0x%02X]", idx)
; } while (0)
;
509 switch (idx) {
510 case ATOM_WS_QUOTIENT0x40:
511 gctx->divmul[0] = val;
512 break;
513 case ATOM_WS_REMAINDER0x41:
514 gctx->divmul[1] = val;
515 break;
516 case ATOM_WS_DATAPTR0x42:
517 gctx->data_block = val;
518 break;
519 case ATOM_WS_SHIFT0x43:
520 gctx->shift = val;
521 break;
522 case ATOM_WS_OR_MASK0x44:
523 case ATOM_WS_AND_MASK0x45:
524 break;
525 case ATOM_WS_FB_WINDOW0x46:
526 gctx->fb_base = val;
527 break;
528 case ATOM_WS_ATTRIBUTES0x47:
529 gctx->io_attr = val;
530 break;
531 case ATOM_WS_REGPTR0x48:
532 gctx->reg_block = val;
533 break;
534 default:
535 ctx->ws[idx] = val;
536 }
537 break;
538 case ATOM_ARG_FB3:
539 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
540 (*ptr)++;
541 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
542 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",__drm_err("ATOM: fb write beyond scratch region: %d vs. %d\n"
, gctx->fb_base + (idx * 4), gctx->scratch_size_bytes)
543 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes)__drm_err("ATOM: fb write beyond scratch region: %d vs. %d\n"
, gctx->fb_base + (idx * 4), gctx->scratch_size_bytes)
;
544 } else
545 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
546 DEBUG("FB[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "FB[0x%02X]", idx)
; } while (0)
;
547 break;
548 case ATOM_ARG_PLL6:
549 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
550 (*ptr)++;
551 DEBUG("PLL[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "PLL[0x%02X]", idx
); } while (0)
;
552 gctx->card->pll_write(gctx->card, idx, val);
553 break;
554 case ATOM_ARG_MC7:
555 idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
556 (*ptr)++;
557 DEBUG("MC[0x%02X]", idx)do if (amdgpu_atom_debug) { printk("\0017" "MC[0x%02X]", idx)
; } while (0)
;
558 gctx->card->mc_write(gctx->card, idx, val);
559 return;
560 }
561 switch (align) {
562 case ATOM_SRC_DWORD0:
563 DEBUG(".[31:0] <- 0x%08X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[31:0] <- 0x%08X\n"
, old_val); } while (0)
;
564 break;
565 case ATOM_SRC_WORD01:
566 DEBUG(".[15:0] <- 0x%04X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[15:0] <- 0x%04X\n"
, old_val); } while (0)
;
567 break;
568 case ATOM_SRC_WORD82:
569 DEBUG(".[23:8] <- 0x%04X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[23:8] <- 0x%04X\n"
, old_val); } while (0)
;
570 break;
571 case ATOM_SRC_WORD163:
572 DEBUG(".[31:16] <- 0x%04X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[31:16] <- 0x%04X\n"
, old_val); } while (0)
;
573 break;
574 case ATOM_SRC_BYTE04:
575 DEBUG(".[7:0] <- 0x%02X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[7:0] <- 0x%02X\n"
, old_val); } while (0)
;
576 break;
577 case ATOM_SRC_BYTE85:
578 DEBUG(".[15:8] <- 0x%02X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[15:8] <- 0x%02X\n"
, old_val); } while (0)
;
579 break;
580 case ATOM_SRC_BYTE166:
581 DEBUG(".[23:16] <- 0x%02X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[23:16] <- 0x%02X\n"
, old_val); } while (0)
;
582 break;
583 case ATOM_SRC_BYTE247:
584 DEBUG(".[31:24] <- 0x%02X\n", old_val)do if (amdgpu_atom_debug) { printk("\0017" ".[31:24] <- 0x%02X\n"
, old_val); } while (0)
;
585 break;
586 }
587}
588
589static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
590{
591 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
592 uint32_t dst, src, saved;
593 int dptr = *ptr;
594 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
595 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
596 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
597 src = atom_get_src(ctx, attr, ptr);
598 dst += src;
599 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
600 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
601}
602
603static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
604{
605 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
606 uint32_t dst, src, saved;
607 int dptr = *ptr;
608 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
609 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
610 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
611 src = atom_get_src(ctx, attr, ptr);
612 dst &= src;
613 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
614 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
615}
616
617static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
618{
619 printk("ATOM BIOS beeped!\n");
620}
621
622static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
623{
624 int idx = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
625 int r = 0;
626
627 if (idx < ATOM_TABLE_NAMES_CNT74)
628 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx])do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" table: %d (%s)\n", idx, atom_table_names
[idx]); } while (0)
;
629 else
630 SDEBUG(" table: %d\n", idx)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" table: %d\n", idx); } while (0)
;
631 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)get_u16(ctx->ctx->bios, (ctx->ctx->cmd_table + 4 +
2 * idx))
)
632 r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
633 if (r) {
634 ctx->abort = true1;
635 }
636}
637
638static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
639{
640 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
641 uint32_t saved;
642 int dptr = *ptr;
643 attr &= 0x38;
644 attr |= atom_def_dst[attr >> 3] << 6;
645 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
646 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
647 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
648}
649
650static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
651{
652 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
653 uint32_t dst, src;
654 SDEBUG(" src1: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src1: "); } while (0)
;
655 dst = atom_get_dst(ctx, arg, attr, ptr, NULL((void *)0), 1);
656 SDEBUG(" src2: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src2: "); } while (0)
;
657 src = atom_get_src(ctx, attr, ptr);
658 ctx->ctx->cs_equal = (dst == src);
659 ctx->ctx->cs_above = (dst > src);
660 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" result: %s %s\n", ctx->ctx->cs_equal
? "EQ" : "NE", ctx->ctx->cs_above ? "GT" : "LE"); } while
(0)
661 ctx->ctx->cs_above ? "GT" : "LE")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" result: %s %s\n", ctx->ctx->cs_equal
? "EQ" : "NE", ctx->ctx->cs_above ? "GT" : "LE"); } while
(0)
;
662}
663
664static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
665{
666 unsigned count = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
667 SDEBUG(" count: %d\n", count)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" count: %d\n", count); } while (0)
;
668 if (arg == ATOM_UNIT_MICROSEC0)
669 udelay(count);
670 else if (!drm_can_sleep())
671 mdelay(count);
672 else
673 drm_msleep(count)mdelay(count);
674}
675
676static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
677{
678 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
679 uint32_t dst, src;
680 SDEBUG(" src1: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src1: "); } while (0)
;
681 dst = atom_get_dst(ctx, arg, attr, ptr, NULL((void *)0), 1);
682 SDEBUG(" src2: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src2: "); } while (0)
;
683 src = atom_get_src(ctx, attr, ptr);
684 if (src != 0) {
685 ctx->ctx->divmul[0] = dst / src;
686 ctx->ctx->divmul[1] = dst % src;
687 } else {
688 ctx->ctx->divmul[0] = 0;
689 ctx->ctx->divmul[1] = 0;
690 }
691}
692
693static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
694{
695 uint64_t val64;
696 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
697 uint32_t dst, src;
698 SDEBUG(" src1: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src1: "); } while (0)
;
699 dst = atom_get_dst(ctx, arg, attr, ptr, NULL((void *)0), 1);
700 SDEBUG(" src2: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src2: "); } while (0)
;
701 src = atom_get_src(ctx, attr, ptr);
702 if (src != 0) {
703 val64 = dst;
704 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
705 do_div(val64, src)({ uint32_t __base = (src); uint32_t __rem = ((uint64_t)(val64
)) % __base; (val64) = ((uint64_t)(val64)) / __base; __rem; }
)
;
706 ctx->ctx->divmul[0] = lower_32_bits(val64)((u32)(val64));
707 ctx->ctx->divmul[1] = upper_32_bits(val64)((u32)(((val64) >> 16) >> 16));
708 } else {
709 ctx->ctx->divmul[0] = 0;
710 ctx->ctx->divmul[1] = 0;
711 }
712}
713
714static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
715{
716 /* functionally, a nop */
717}
718
719static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
720{
721 int execute = 0, target = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
722 unsigned long cjiffies;
723
724 (*ptr) += 2;
725 switch (arg) {
726 case ATOM_COND_ABOVE0:
727 execute = ctx->ctx->cs_above;
728 break;
729 case ATOM_COND_ABOVEOREQUAL1:
730 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
731 break;
732 case ATOM_COND_ALWAYS2:
733 execute = 1;
734 break;
735 case ATOM_COND_BELOW3:
736 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
737 break;
738 case ATOM_COND_BELOWOREQUAL4:
739 execute = !ctx->ctx->cs_above;
740 break;
741 case ATOM_COND_EQUAL5:
742 execute = ctx->ctx->cs_equal;
743 break;
744 case ATOM_COND_NOTEQUAL6:
745 execute = !ctx->ctx->cs_equal;
746 break;
747 }
748 if (arg != ATOM_COND_ALWAYS2)
749 SDEBUG(" taken: %s\n", str_yes_no(execute))do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" taken: %s\n", str_yes_no(execute));
} while (0)
;
750 SDEBUG(" target: 0x%04X\n", target)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" target: 0x%04X\n", target); } while
(0)
;
751 if (execute) {
752 if (ctx->last_jump == (ctx->start + target)) {
753 cjiffies = jiffies;
754 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
755 cjiffies -= ctx->last_jump_jiffies;
756 if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC20*1000)) {
757 DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",__drm_err("atombios stuck in loop for more than %dsecs aborting\n"
, 20)
758 ATOM_CMD_TIMEOUT_SEC)__drm_err("atombios stuck in loop for more than %dsecs aborting\n"
, 20)
;
759 ctx->abort = true1;
760 }
761 } else {
762 /* jiffies wrap around we will just wait a little longer */
763 ctx->last_jump_jiffies = jiffies;
764 }
765 } else {
766 ctx->last_jump = ctx->start + target;
767 ctx->last_jump_jiffies = jiffies;
768 }
769 *ptr = ctx->start + target;
770 }
771}
772
773static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
774{
775 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
776 uint32_t dst, mask, src, saved;
777 int dptr = *ptr;
778 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
779 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
780 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
781 SDEBUG(" mask: 0x%08x", mask)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" mask: 0x%08x", mask); } while (0)
;
782 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
783 src = atom_get_src(ctx, attr, ptr);
784 dst &= mask;
785 dst |= src;
786 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
787 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
788}
789
790static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
791{
792 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
793 uint32_t src, saved;
794 int dptr = *ptr;
795 if (((attr >> 3) & 7) != ATOM_SRC_DWORD0)
796 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
797 else {
798 atom_skip_dst(ctx, arg, attr, ptr);
799 saved = 0xCDCDCDCD;
800 }
801 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
802 src = atom_get_src(ctx, attr, ptr);
803 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
804 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
805}
806
807static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
808{
809 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
810 uint32_t dst, src;
811 SDEBUG(" src1: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src1: "); } while (0)
;
812 dst = atom_get_dst(ctx, arg, attr, ptr, NULL((void *)0), 1);
813 SDEBUG(" src2: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src2: "); } while (0)
;
814 src = atom_get_src(ctx, attr, ptr);
815 ctx->ctx->divmul[0] = dst * src;
816}
817
818static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
819{
820 uint64_t val64;
821 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
822 uint32_t dst, src;
823 SDEBUG(" src1: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src1: "); } while (0)
;
824 dst = atom_get_dst(ctx, arg, attr, ptr, NULL((void *)0), 1);
825 SDEBUG(" src2: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src2: "); } while (0)
;
826 src = atom_get_src(ctx, attr, ptr);
827 val64 = (uint64_t)dst * (uint64_t)src;
828 ctx->ctx->divmul[0] = lower_32_bits(val64)((u32)(val64));
829 ctx->ctx->divmul[1] = upper_32_bits(val64)((u32)(((val64) >> 16) >> 16));
830}
831
832static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
833{
834 /* nothing */
835}
836
837static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
838{
839 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
840 uint32_t dst, src, saved;
841 int dptr = *ptr;
842 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
843 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
844 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
845 src = atom_get_src(ctx, attr, ptr);
846 dst |= src;
847 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
848 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
849}
850
851static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
852{
853 uint8_t val = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
854 SDEBUG("POST card output: 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk("POST card output: 0x%02X\n", val); } while
(0)
;
855}
856
857static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
858{
859 pr_info("unimplemented!\n")do { } while(0);
860}
861
862static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
863{
864 pr_info("unimplemented!\n")do { } while(0);
865}
866
867static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
868{
869 pr_info("unimplemented!\n")do { } while(0);
870}
871
872static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
873{
874 int idx = U8(*ptr)get_u8(ctx->ctx->bios, (*ptr));
875 (*ptr)++;
876 SDEBUG(" block: %d\n", idx)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" block: %d\n", idx); } while (0)
;
877 if (!idx)
878 ctx->ctx->data_block = 0;
879 else if (idx == 255)
880 ctx->ctx->data_block = ctx->start;
881 else
882 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx)get_u16(ctx->ctx->bios, (ctx->ctx->data_table + 4
+ 2 * idx))
;
883 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" base: 0x%04X\n", ctx->ctx->data_block
); } while (0)
;
884}
885
886static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
887{
888 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
889 SDEBUG(" fb_base: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" fb_base: "); } while (0)
;
890 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
891}
892
893static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
894{
895 int port;
896 switch (arg) {
897 case ATOM_PORT_ATI0:
898 port = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
899 if (port < ATOM_IO_NAMES_CNT5)
900 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port])do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" port: %d (%s)\n", port, atom_io_names
[port]); } while (0)
;
901 else
902 SDEBUG(" port: %d\n", port)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" port: %d\n", port); } while (0)
;
903 if (!port)
904 ctx->ctx->io_mode = ATOM_IO_MM0;
905 else
906 ctx->ctx->io_mode = ATOM_IO_IIO0x80 | port;
907 (*ptr) += 2;
908 break;
909 case ATOM_PORT_PCI1:
910 ctx->ctx->io_mode = ATOM_IO_PCI1;
911 (*ptr)++;
912 break;
913 case ATOM_PORT_SYSIO2:
914 ctx->ctx->io_mode = ATOM_IO_SYSIO2;
915 (*ptr)++;
916 break;
917 }
918}
919
920static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
921{
922 ctx->ctx->reg_block = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
923 (*ptr) += 2;
924 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" base: 0x%04X\n", ctx->ctx->reg_block
); } while (0)
;
925}
926
927static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
928{
929 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++)), shift;
930 uint32_t saved, dst;
931 int dptr = *ptr;
932 attr &= 0x38;
933 attr |= atom_def_dst[attr >> 3] << 6;
934 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
935 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
936 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE04, ptr);
937 SDEBUG(" shift: %d\n", shift)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" shift: %d\n", shift); } while (0)
;
938 dst <<= shift;
939 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
940 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
941}
942
943static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
944{
945 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++)), shift;
946 uint32_t saved, dst;
947 int dptr = *ptr;
948 attr &= 0x38;
949 attr |= atom_def_dst[attr >> 3] << 6;
950 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
951 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
952 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE04, ptr);
953 SDEBUG(" shift: %d\n", shift)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" shift: %d\n", shift); } while (0)
;
954 dst >>= shift;
955 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
956 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
957}
958
959static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
960{
961 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++)), shift;
962 uint32_t saved, dst;
963 int dptr = *ptr;
964 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
965 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
966 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
967 /* op needs to full dst value */
968 dst = saved;
969 shift = atom_get_src(ctx, attr, ptr);
970 SDEBUG(" shift: %d\n", shift)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" shift: %d\n", shift); } while (0)
;
971 dst <<= shift;
972 dst &= atom_arg_mask[dst_align];
973 dst >>= atom_arg_shift[dst_align];
974 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
975 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
976}
977
978static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
979{
980 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++)), shift;
981 uint32_t saved, dst;
982 int dptr = *ptr;
983 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
984 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
985 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
986 /* op needs to full dst value */
987 dst = saved;
988 shift = atom_get_src(ctx, attr, ptr);
989 SDEBUG(" shift: %d\n", shift)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" shift: %d\n", shift); } while (0)
;
990 dst >>= shift;
991 dst &= atom_arg_mask[dst_align];
992 dst >>= atom_arg_shift[dst_align];
993 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
994 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
995}
996
997static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
998{
999 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
1000 uint32_t dst, src, saved;
1001 int dptr = *ptr;
1002 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
1003 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1004 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
1005 src = atom_get_src(ctx, attr, ptr);
1006 dst -= src;
1007 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
1008 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1009}
1010
1011static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1012{
1013 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
1014 uint32_t src, val, target;
1015 SDEBUG(" switch: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" switch: "); } while (0)
;
1016 src = atom_get_src(ctx, attr, ptr);
1017 while (U16(*ptr)get_u16(ctx->ctx->bios, (*ptr)) != ATOM_CASE_END0x5A5A)
1018 if (U8(*ptr)get_u8(ctx->ctx->bios, (*ptr)) == ATOM_CASE_MAGIC0x63) {
1019 (*ptr)++;
1020 SDEBUG(" case: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" case: "); } while (0)
;
1021 val =
1022 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM5,
1023 ptr);
1024 target = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
1025 if (val == src) {
1026 SDEBUG(" target: %04X\n", target)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" target: %04X\n", target); } while (
0)
;
1027 *ptr = ctx->start + target;
1028 return;
1029 }
1030 (*ptr) += 2;
1031 } else {
1032 pr_info("Bad case\n")do { } while(0);
1033 return;
1034 }
1035 (*ptr) += 2;
1036}
1037
1038static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1039{
1040 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
1041 uint32_t dst, src;
1042 SDEBUG(" src1: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src1: "); } while (0)
;
1043 dst = atom_get_dst(ctx, arg, attr, ptr, NULL((void *)0), 1);
1044 SDEBUG(" src2: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src2: "); } while (0)
;
1045 src = atom_get_src(ctx, attr, ptr);
1046 ctx->ctx->cs_equal = ((dst & src) == 0);
1047 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" result: %s\n", ctx->ctx->cs_equal
? "EQ" : "NE"); } while (0)
;
1048}
1049
1050static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1051{
1052 uint8_t attr = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
1053 uint32_t dst, src, saved;
1054 int dptr = *ptr;
1055 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
1056 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1057 SDEBUG(" src: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" src: "); } while (0)
;
1058 src = atom_get_src(ctx, attr, ptr);
1059 dst ^= src;
1060 SDEBUG(" dst: ")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(" dst: "); } while (0)
;
1061 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1062}
1063
1064static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1065{
1066 uint8_t val = U8((*ptr)++)get_u8(ctx->ctx->bios, ((*ptr)++));
1067 SDEBUG("DEBUG output: 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk("DEBUG output: 0x%02X\n", val); } while
(0)
;
1068}
1069
1070static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1071{
1072 uint16_t val = U16(*ptr)get_u16(ctx->ctx->bios, (*ptr));
1073 (*ptr) += val + 2;
1074 SDEBUG("PROCESSDS output: 0x%02X\n", val)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk("PROCESSDS output: 0x%02X\n", val); } while
(0)
;
1075}
1076
1077static struct {
1078 void (*func) (atom_exec_context *, int *, int);
1079 int arg;
1080} opcode_table[ATOM_OP_CNT127] = {
1081 {
1082 NULL((void *)0), 0}, {
1083 atom_op_move, ATOM_ARG_REG0}, {
1084 atom_op_move, ATOM_ARG_PS1}, {
1085 atom_op_move, ATOM_ARG_WS2}, {
1086 atom_op_move, ATOM_ARG_FB3}, {
1087 atom_op_move, ATOM_ARG_PLL6}, {
1088 atom_op_move, ATOM_ARG_MC7}, {
1089 atom_op_and, ATOM_ARG_REG0}, {
1090 atom_op_and, ATOM_ARG_PS1}, {
1091 atom_op_and, ATOM_ARG_WS2}, {
1092 atom_op_and, ATOM_ARG_FB3}, {
1093 atom_op_and, ATOM_ARG_PLL6}, {
1094 atom_op_and, ATOM_ARG_MC7}, {
1095 atom_op_or, ATOM_ARG_REG0}, {
1096 atom_op_or, ATOM_ARG_PS1}, {
1097 atom_op_or, ATOM_ARG_WS2}, {
1098 atom_op_or, ATOM_ARG_FB3}, {
1099 atom_op_or, ATOM_ARG_PLL6}, {
1100 atom_op_or, ATOM_ARG_MC7}, {
1101 atom_op_shift_left, ATOM_ARG_REG0}, {
1102 atom_op_shift_left, ATOM_ARG_PS1}, {
1103 atom_op_shift_left, ATOM_ARG_WS2}, {
1104 atom_op_shift_left, ATOM_ARG_FB3}, {
1105 atom_op_shift_left, ATOM_ARG_PLL6}, {
1106 atom_op_shift_left, ATOM_ARG_MC7}, {
1107 atom_op_shift_right, ATOM_ARG_REG0}, {
1108 atom_op_shift_right, ATOM_ARG_PS1}, {
1109 atom_op_shift_right, ATOM_ARG_WS2}, {
1110 atom_op_shift_right, ATOM_ARG_FB3}, {
1111 atom_op_shift_right, ATOM_ARG_PLL6}, {
1112 atom_op_shift_right, ATOM_ARG_MC7}, {
1113 atom_op_mul, ATOM_ARG_REG0}, {
1114 atom_op_mul, ATOM_ARG_PS1}, {
1115 atom_op_mul, ATOM_ARG_WS2}, {
1116 atom_op_mul, ATOM_ARG_FB3}, {
1117 atom_op_mul, ATOM_ARG_PLL6}, {
1118 atom_op_mul, ATOM_ARG_MC7}, {
1119 atom_op_div, ATOM_ARG_REG0}, {
1120 atom_op_div, ATOM_ARG_PS1}, {
1121 atom_op_div, ATOM_ARG_WS2}, {
1122 atom_op_div, ATOM_ARG_FB3}, {
1123 atom_op_div, ATOM_ARG_PLL6}, {
1124 atom_op_div, ATOM_ARG_MC7}, {
1125 atom_op_add, ATOM_ARG_REG0}, {
1126 atom_op_add, ATOM_ARG_PS1}, {
1127 atom_op_add, ATOM_ARG_WS2}, {
1128 atom_op_add, ATOM_ARG_FB3}, {
1129 atom_op_add, ATOM_ARG_PLL6}, {
1130 atom_op_add, ATOM_ARG_MC7}, {
1131 atom_op_sub, ATOM_ARG_REG0}, {
1132 atom_op_sub, ATOM_ARG_PS1}, {
1133 atom_op_sub, ATOM_ARG_WS2}, {
1134 atom_op_sub, ATOM_ARG_FB3}, {
1135 atom_op_sub, ATOM_ARG_PLL6}, {
1136 atom_op_sub, ATOM_ARG_MC7}, {
1137 atom_op_setport, ATOM_PORT_ATI0}, {
1138 atom_op_setport, ATOM_PORT_PCI1}, {
1139 atom_op_setport, ATOM_PORT_SYSIO2}, {
1140 atom_op_setregblock, 0}, {
1141 atom_op_setfbbase, 0}, {
1142 atom_op_compare, ATOM_ARG_REG0}, {
1143 atom_op_compare, ATOM_ARG_PS1}, {
1144 atom_op_compare, ATOM_ARG_WS2}, {
1145 atom_op_compare, ATOM_ARG_FB3}, {
1146 atom_op_compare, ATOM_ARG_PLL6}, {
1147 atom_op_compare, ATOM_ARG_MC7}, {
1148 atom_op_switch, 0}, {
1149 atom_op_jump, ATOM_COND_ALWAYS2}, {
1150 atom_op_jump, ATOM_COND_EQUAL5}, {
1151 atom_op_jump, ATOM_COND_BELOW3}, {
1152 atom_op_jump, ATOM_COND_ABOVE0}, {
1153 atom_op_jump, ATOM_COND_BELOWOREQUAL4}, {
1154 atom_op_jump, ATOM_COND_ABOVEOREQUAL1}, {
1155 atom_op_jump, ATOM_COND_NOTEQUAL6}, {
1156 atom_op_test, ATOM_ARG_REG0}, {
1157 atom_op_test, ATOM_ARG_PS1}, {
1158 atom_op_test, ATOM_ARG_WS2}, {
1159 atom_op_test, ATOM_ARG_FB3}, {
1160 atom_op_test, ATOM_ARG_PLL6}, {
1161 atom_op_test, ATOM_ARG_MC7}, {
1162 atom_op_delay, ATOM_UNIT_MILLISEC1}, {
1163 atom_op_delay, ATOM_UNIT_MICROSEC0}, {
1164 atom_op_calltable, 0}, {
1165 atom_op_repeat, 0}, {
1166 atom_op_clear, ATOM_ARG_REG0}, {
1167 atom_op_clear, ATOM_ARG_PS1}, {
1168 atom_op_clear, ATOM_ARG_WS2}, {
1169 atom_op_clear, ATOM_ARG_FB3}, {
1170 atom_op_clear, ATOM_ARG_PLL6}, {
1171 atom_op_clear, ATOM_ARG_MC7}, {
1172 atom_op_nop, 0}, {
1173 atom_op_eot, 0}, {
1174 atom_op_mask, ATOM_ARG_REG0}, {
1175 atom_op_mask, ATOM_ARG_PS1}, {
1176 atom_op_mask, ATOM_ARG_WS2}, {
1177 atom_op_mask, ATOM_ARG_FB3}, {
1178 atom_op_mask, ATOM_ARG_PLL6}, {
1179 atom_op_mask, ATOM_ARG_MC7}, {
1180 atom_op_postcard, 0}, {
1181 atom_op_beep, 0}, {
1182 atom_op_savereg, 0}, {
1183 atom_op_restorereg, 0}, {
1184 atom_op_setdatablock, 0}, {
1185 atom_op_xor, ATOM_ARG_REG0}, {
1186 atom_op_xor, ATOM_ARG_PS1}, {
1187 atom_op_xor, ATOM_ARG_WS2}, {
1188 atom_op_xor, ATOM_ARG_FB3}, {
1189 atom_op_xor, ATOM_ARG_PLL6}, {
1190 atom_op_xor, ATOM_ARG_MC7}, {
1191 atom_op_shl, ATOM_ARG_REG0}, {
1192 atom_op_shl, ATOM_ARG_PS1}, {
1193 atom_op_shl, ATOM_ARG_WS2}, {
1194 atom_op_shl, ATOM_ARG_FB3}, {
1195 atom_op_shl, ATOM_ARG_PLL6}, {
1196 atom_op_shl, ATOM_ARG_MC7}, {
1197 atom_op_shr, ATOM_ARG_REG0}, {
1198 atom_op_shr, ATOM_ARG_PS1}, {
1199 atom_op_shr, ATOM_ARG_WS2}, {
1200 atom_op_shr, ATOM_ARG_FB3}, {
1201 atom_op_shr, ATOM_ARG_PLL6}, {
1202 atom_op_shr, ATOM_ARG_MC7}, {
1203 atom_op_debug, 0}, {
1204 atom_op_processds, 0}, {
1205 atom_op_mul32, ATOM_ARG_PS1}, {
1206 atom_op_mul32, ATOM_ARG_WS2}, {
1207 atom_op_div32, ATOM_ARG_PS1}, {
1208 atom_op_div32, ATOM_ARG_WS2},
1209};
1210
1211static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params)
1212{
1213 int base = CU16(ctx->cmd_table + 4 + 2 * index)get_u16(ctx->bios, (ctx->cmd_table + 4 + 2 * index));
1214 int len, ws, ps, ptr;
1215 unsigned char op;
1216 atom_exec_context ectx;
1217 int ret = 0;
1218
1219 if (!base)
1220 return -EINVAL22;
1221
1222 len = CU16(base + ATOM_CT_SIZE_PTR)get_u16(ctx->bios, (base + 0));
1223 ws = CU8(base + ATOM_CT_WS_PTR)get_u8(ctx->bios, (base + 4));
1224 ps = CU8(base + ATOM_CT_PS_PTR)get_u8(ctx->bios, (base + 5)) & ATOM_CT_PS_MASK0x7F;
1225 ptr = base + ATOM_CT_CODE_PTR6;
1226
1227 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk(">> execute %04X (len %d, WS %d, PS %d)\n"
, base, len, ws, ps); } while (0)
;
1228
1229 ectx.ctx = ctx;
1230 ectx.ps_shift = ps / 4;
1231 ectx.start = base;
1232 ectx.ps = params;
1233 ectx.abort = false0;
1234 ectx.last_jump = 0;
1235 if (ws)
1236 ectx.ws = kcalloc(4, ws, GFP_KERNEL(0x0001 | 0x0004));
1237 else
1238 ectx.ws = NULL((void *)0);
1239
1240 debug_depth++;
1241 while (1) {
1242 op = CU8(ptr++)get_u8(ctx->bios, (ptr++));
1243 if (op < ATOM_OP_NAMES_CNT123)
1244 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk("%s @ 0x%04X\n", atom_op_names[op], ptr
- 1); } while (0)
;
1245 else
1246 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1)do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk("[%d] @ 0x%04X\n", op, ptr - 1); } while
(0)
;
1247 if (ectx.abort) {
1248 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",__drm_err("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n"
, base, len, ws, ps, ptr - 1)
1249 base, len, ws, ps, ptr - 1)__drm_err("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n"
, base, len, ws, ps, ptr - 1)
;
1250 ret = -EINVAL22;
1251 goto free;
1252 }
1253
1254 if (op < ATOM_OP_CNT127 && op > 0)
1255 opcode_table[op].func(&ectx, &ptr,
1256 opcode_table[op].arg);
1257 else
1258 break;
1259
1260 if (op == ATOM_OP_EOT91)
1261 break;
1262 }
1263 debug_depth--;
1264 SDEBUG("<<\n")do if (amdgpu_atom_debug) { printk("\0017"); debug_print_spaces
(debug_depth); printk("<<\n"); } while (0)
;
1265
1266free:
1267 if (ws)
1268 kfree(ectx.ws);
1269 return ret;
1270}
1271
1272int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params)
1273{
1274 int r;
1275
1276 mutex_lock(&ctx->mutex)rw_enter_write(&ctx->mutex);
1277 /* reset data block */
1278 ctx->data_block = 0;
1279 /* reset reg block */
1280 ctx->reg_block = 0;
1281 /* reset fb window */
1282 ctx->fb_base = 0;
1283 /* reset io mode */
1284 ctx->io_mode = ATOM_IO_MM0;
1285 /* reset divmul */
1286 ctx->divmul[0] = 0;
1287 ctx->divmul[1] = 0;
1288 r = amdgpu_atom_execute_table_locked(ctx, index, params);
1289 mutex_unlock(&ctx->mutex)rw_exit_write(&ctx->mutex);
1290 return r;
1291}
1292
1293static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1294
1295static void atom_index_iio(struct atom_context *ctx, int base)
1296{
1297 ctx->iio = kzalloc(2 * 256, GFP_KERNEL(0x0001 | 0x0004));
1298 if (!ctx->iio)
1299 return;
1300 while (CU8(base)get_u8(ctx->bios, (base)) == ATOM_IIO_START1) {
1301 ctx->iio[CU8(base + 1)get_u8(ctx->bios, (base + 1))] = base + 2;
1302 base += 2;
1303 while (CU8(base)get_u8(ctx->bios, (base)) != ATOM_IIO_END9)
1304 base += atom_iio_len[CU8(base)get_u8(ctx->bios, (base))];
1305 base += 3;
1306 }
1307}
1308
1309static void atom_get_vbios_name(struct atom_context *ctx)
1310{
1311 unsigned char *p_rom;
1312 unsigned char str_num;
1313 unsigned short off_to_vbios_str;
1314 unsigned char *c_ptr;
1315 int name_size;
1316 int i;
1317
1318 const char *na = "--N/A--";
1319 char *back;
1320
1321 p_rom = ctx->bios;
1322
1323 str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS);
1324 if (str_num != 0) {
1325 off_to_vbios_str =
1326 *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
1327
1328 c_ptr = (unsigned char *)(p_rom + off_to_vbios_str);
1329 } else {
1330 /* do not know where to find name */
1331 memcpy(ctx->name, na, 7)__builtin_memcpy((ctx->name), (na), (7));
1332 ctx->name[7] = 0;
1333 return;
1334 }
1335
1336 /*
1337 * skip the atombios strings, usually 4
1338 * 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type
1339 */
1340 for (i = 0; i < str_num; i++) {
1341 while (*c_ptr != 0)
1342 c_ptr++;
1343 c_ptr++;
1344 }
1345
1346 /* skip the following 2 chars: 0x0D 0x0A */
1347 c_ptr += 2;
1348
1349 name_size = strnlen(c_ptr, STRLEN_LONG64 - 1);
1350 memcpy(ctx->name, c_ptr, name_size)__builtin_memcpy((ctx->name), (c_ptr), (name_size));
1351 back = ctx->name + name_size;
1352 while ((*--back) == ' ')
1353 ;
1354 *(back + 1) = '\0';
1355}
1356
1357static void atom_get_vbios_date(struct atom_context *ctx)
1358{
1359 unsigned char *p_rom;
1360 unsigned char *date_in_rom;
1361
1362 p_rom = ctx->bios;
1363
1364 date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE;
1365
1366 ctx->date[0] = '2';
1367 ctx->date[1] = '0';
1368 ctx->date[2] = date_in_rom[6];
1369 ctx->date[3] = date_in_rom[7];
1370 ctx->date[4] = '/';
1371 ctx->date[5] = date_in_rom[0];
1372 ctx->date[6] = date_in_rom[1];
1373 ctx->date[7] = '/';
1374 ctx->date[8] = date_in_rom[3];
1375 ctx->date[9] = date_in_rom[4];
1376 ctx->date[10] = ' ';
1377 ctx->date[11] = date_in_rom[9];
1378 ctx->date[12] = date_in_rom[10];
1379 ctx->date[13] = date_in_rom[11];
1380 ctx->date[14] = date_in_rom[12];
1381 ctx->date[15] = date_in_rom[13];
1382 ctx->date[16] = '\0';
1383}
1384
1385static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start,
1386 int end, int maxlen)
1387{
1388 unsigned long str_off;
1389 unsigned char *p_rom;
1390 unsigned short str_len;
1391
1392 str_off = 0;
Value stored to 'str_off' is never read
1393 str_len = strnlen(str, maxlen);
1394 p_rom = ctx->bios;
1395
1396 for (; start <= end; ++start) {
1397 for (str_off = 0; str_off < str_len; ++str_off) {
1398 if (str[str_off] != *(p_rom + start + str_off))
1399 break;
1400 }
1401
1402 if (str_off == str_len || str[str_off] == 0)
1403 return p_rom + start;
1404 }
1405 return NULL((void *)0);
1406}
1407
1408static void atom_get_vbios_pn(struct atom_context *ctx)
1409{
1410 unsigned char *p_rom;
1411 unsigned short off_to_vbios_str;
1412 unsigned char *vbios_str;
1413 int count;
1414
1415 off_to_vbios_str = 0;
1416 p_rom = ctx->bios;
1417
1418 if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) {
1419 off_to_vbios_str =
1420 *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
1421
1422 vbios_str = (unsigned char *)(p_rom + off_to_vbios_str);
1423 } else {
1424 vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER;
1425 }
1426
1427 if (*vbios_str == 0) {
1428 vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX"ATOMBIOS", 3, 1024, 64);
1429 if (vbios_str == NULL((void *)0))
1430 vbios_str += sizeof(BIOS_ATOM_PREFIX"ATOMBIOS") - 1;
1431 }
1432 if (vbios_str != NULL((void *)0) && *vbios_str == 0)
1433 vbios_str++;
1434
1435 if (vbios_str != NULL((void *)0)) {
1436 count = 0;
1437 while ((count < BIOS_STRING_LENGTH43) && vbios_str[count] >= ' ' &&
1438 vbios_str[count] <= 'z') {
1439 ctx->vbios_pn[count] = vbios_str[count];
1440 count++;
1441 }
1442
1443 ctx->vbios_pn[count] = 0;
1444 }
1445}
1446
1447static void atom_get_vbios_version(struct atom_context *ctx)
1448{
1449 unsigned char *vbios_ver;
1450
1451 /* find anchor ATOMBIOSBK-AMD */
1452 vbios_ver = atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX"ATOMBIOSBK-AMD", 3, 1024, 64);
1453 if (vbios_ver != NULL((void *)0)) {
1454 /* skip ATOMBIOSBK-AMD VER */
1455 vbios_ver += 18;
1456 memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL)__builtin_memcpy((ctx->vbios_ver_str), (vbios_ver), (32));
1457 } else {
1458 ctx->vbios_ver_str[0] = '\0';
1459 }
1460}
1461
1462struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1463{
1464 int base;
1465 struct atom_context *ctx =
1466 kzalloc(sizeof(struct atom_context), GFP_KERNEL(0x0001 | 0x0004));
1467 char *str;
1468 struct _ATOM_ROM_HEADER *atom_rom_header;
1469 struct _ATOM_MASTER_DATA_TABLE *master_table;
1470 struct _ATOM_FIRMWARE_INFO *atom_fw_info;
1471 u16 idx;
1472
1473 if (!ctx)
1474 return NULL((void *)0);
1475
1476 ctx->card = card;
1477 ctx->bios = bios;
1478
1479 if (CU16(0)get_u16(ctx->bios, (0)) != ATOM_BIOS_MAGIC0xAA55) {
1480 pr_info("Invalid BIOS magic\n")do { } while(0);
1481 kfree(ctx);
1482 return NULL((void *)0);
1483 }
1484 if (strncmp
1485 (CSTR(ATOM_ATI_MAGIC_PTR)(((char *)(ctx->bios))+(0x30)), ATOM_ATI_MAGIC" 761295520",
1486 strlen(ATOM_ATI_MAGIC" 761295520"))) {
1487 pr_info("Invalid ATI magic\n")do { } while(0);
1488 kfree(ctx);
1489 return NULL((void *)0);
1490 }
1491
1492 base = CU16(ATOM_ROM_TABLE_PTR)get_u16(ctx->bios, (0x48));
1493 if (strncmp
1494 (CSTR(base + ATOM_ROM_MAGIC_PTR)(((char *)(ctx->bios))+(base + 4)), ATOM_ROM_MAGIC"ATOM",
1495 strlen(ATOM_ROM_MAGIC"ATOM"))) {
1496 pr_info("Invalid ATOM magic\n")do { } while(0);
1497 kfree(ctx);
1498 return NULL((void *)0);
1499 }
1500
1501 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR)get_u16(ctx->bios, (base + 0x1E));
1502 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR)get_u16(ctx->bios, (base + 0x20));
1503 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR)get_u16(ctx->bios, (ctx->data_table + 0x32)) + 4);
1504 if (!ctx->iio) {
1505 amdgpu_atom_destroy(ctx);
1506 return NULL((void *)0);
1507 }
1508
1509 idx = CU16(ATOM_ROM_PART_NUMBER_PTR)get_u16(ctx->bios, (0x6E));
1510 if (idx == 0)
1511 idx = 0x80;
1512
1513 str = CSTR(idx)(((char *)(ctx->bios))+(idx));
1514 if (*str != '\0') {
1515 pr_info("ATOM BIOS: %s\n", str)do { } while(0);
1516 strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
1517 }
1518
1519 atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base)(((char *)(ctx->bios))+(base));
1520 if (atom_rom_header->usMasterDataTableOffset != 0) {
1521 master_table = (struct _ATOM_MASTER_DATA_TABLE *)
1522 CSTR(atom_rom_header->usMasterDataTableOffset)(((char *)(ctx->bios))+(atom_rom_header->usMasterDataTableOffset
))
;
1523 if (master_table->ListOfDataTables.FirmwareInfo != 0) {
1524 atom_fw_info = (struct _ATOM_FIRMWARE_INFO *)
1525 CSTR(master_table->ListOfDataTables.FirmwareInfo)(((char *)(ctx->bios))+(master_table->ListOfDataTables.
FirmwareInfo))
;
1526 ctx->version = atom_fw_info->ulFirmwareRevision;
1527 }
1528 }
1529
1530 atom_get_vbios_name(ctx);
1531 atom_get_vbios_pn(ctx);
1532 atom_get_vbios_date(ctx);
1533 atom_get_vbios_version(ctx);
1534
1535 return ctx;
1536}
1537
1538int amdgpu_atom_asic_init(struct atom_context *ctx)
1539{
1540 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR)get_u16(ctx->bios, (ctx->data_table + 0xC));
1541 uint32_t ps[16];
1542 int ret;
1543
1544 memset(ps, 0, 64)__builtin_memset((ps), (0), (64));
1545
1546 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR))((__uint32_t)(get_u32(ctx->bios, (hwi + 8))));
1547 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR))((__uint32_t)(get_u32(ctx->bios, (hwi + 0xC))));
1548 if (!ps[0] || !ps[1])
1549 return 1;
1550
1551 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)get_u16(ctx->bios, (ctx->cmd_table + 4 + 2 * 0)))
1552 return 1;
1553 ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT0, ps);
1554 if (ret)
1555 return ret;
1556
1557 memset(ps, 0, 64)__builtin_memset((ps), (0), (64));
1558
1559 return ret;
1560}
1561
1562void amdgpu_atom_destroy(struct atom_context *ctx)
1563{
1564 kfree(ctx->iio);
1565 kfree(ctx);
1566}
1567
1568bool_Bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1569 uint16_t *size, uint8_t *frev, uint8_t *crev,
1570 uint16_t *data_start)
1571{
1572 int offset = index * 2 + 4;
1573 int idx = CU16(ctx->data_table + offset)get_u16(ctx->bios, (ctx->data_table + offset));
1574 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1575
1576 if (!mdt[index])
1577 return false0;
1578
1579 if (size)
1580 *size = CU16(idx)get_u16(ctx->bios, (idx));
1581 if (frev)
1582 *frev = CU8(idx + 2)get_u8(ctx->bios, (idx + 2));
1583 if (crev)
1584 *crev = CU8(idx + 3)get_u8(ctx->bios, (idx + 3));
1585 *data_start = idx;
1586 return true1;
1587}
1588
1589bool_Bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev,
1590 uint8_t *crev)
1591{
1592 int offset = index * 2 + 4;
1593 int idx = CU16(ctx->cmd_table + offset)get_u16(ctx->bios, (ctx->cmd_table + offset));
1594 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1595
1596 if (!mct[index])
1597 return false0;
1598
1599 if (frev)
1600 *frev = CU8(idx + 2)get_u8(ctx->bios, (idx + 2));
1601 if (crev)
1602 *crev = CU8(idx + 3)get_u8(ctx->bios, (idx + 3));
1603 return true1;
1604}
1605