Bug Summary

File:dev/acpi/acpicpu.c
Warning:line 1200, column 2
Access to field 'state' results in a dereference of a null pointer (loaded from variable 'best')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name acpicpu.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/acpi/acpicpu.c
1/* $OpenBSD: acpicpu.c,v 1.91 2022/01/09 05:42:37 jsg Exp $ */
2/*
3 * Copyright (c) 2005 Marco Peereboom <marco@openbsd.org>
4 * Copyright (c) 2015 Philip Guenther <guenther@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/param.h>
20#include <sys/kernel.h> /* for tick */
21#include <sys/signalvar.h>
22#include <sys/sysctl.h>
23#include <sys/systm.h>
24#include <sys/device.h>
25#include <sys/malloc.h>
26#include <sys/queue.h>
27#include <sys/atomic.h>
28
29#include <machine/bus.h>
30#include <machine/cpu.h>
31#include <machine/cpufunc.h>
32#include <machine/specialreg.h>
33
34#include <dev/acpi/acpireg.h>
35#include <dev/acpi/acpivar.h>
36#include <dev/acpi/acpidev.h>
37#include <dev/acpi/amltypes.h>
38#include <dev/acpi/dsdt.h>
39
40#include <sys/sensors.h>
41
42int acpicpu_match(struct device *, void *, void *);
43void acpicpu_attach(struct device *, struct device *, void *);
44int acpicpu_notify(struct aml_node *, int, void *);
45void acpicpu_setperf(int);
46void acpicpu_setperf_ppc_change(struct acpicpu_pss *, int);
47
48#define ACPI_STATE_C00x00 0x00
49#define ACPI_STATE_C10x01 0x01
50#define ACPI_STATE_C20x02 0x02
51#define ACPI_STATE_C30x03 0x03
52
53#define ACPI_PDC_REVID0x1 0x1
54#define ACPI_PDC_SMP0xa 0xa
55#define ACPI_PDC_MSR0x1 0x1
56
57/* _PDC/_OSC Intel capabilities flags */
58#define ACPI_PDC_P_FFH0x0001 0x0001
59#define ACPI_PDC_C_C1_HALT0x0002 0x0002
60#define ACPI_PDC_T_FFH0x0004 0x0004
61#define ACPI_PDC_SMP_C1PT0x0008 0x0008
62#define ACPI_PDC_SMP_C2C30x0010 0x0010
63#define ACPI_PDC_SMP_P_SWCOORD0x0020 0x0020
64#define ACPI_PDC_SMP_C_SWCOORD0x0040 0x0040
65#define ACPI_PDC_SMP_T_SWCOORD0x0080 0x0080
66#define ACPI_PDC_C_C1_FFH0x0100 0x0100
67#define ACPI_PDC_C_C2C3_FFH0x0200 0x0200
68/* reserved 0x0400 */
69#define ACPI_PDC_P_HWCOORD0x0800 0x0800
70#define ACPI_PDC_PPC_NOTIFY0x1000 0x1000
71
72#define CST_METH_HALT0 0
73#define CST_METH_IO_HALT1 1
74#define CST_METH_MWAIT2 2
75#define CST_METH_GAS_IO3 3
76
77/* flags on Intel's FFH mwait method */
78#define CST_FLAG_MWAIT_HW_COORD0x1 0x1
79#define CST_FLAG_MWAIT_BM_AVOIDANCE0x2 0x2
80#define CST_FLAG_FALLBACK0x4000 0x4000 /* fallback for broken _CST */
81#define CST_FLAG_SKIP0x8000 0x8000 /* state is worse choice */
82
83#define FLAGS_MWAIT_ONLY0x02 0x02
84#define FLAGS_BMCHECK0x04 0x04
85#define FLAGS_NOTHROTTLE0x08 0x08
86#define FLAGS_NOPSS0x10 0x10
87#define FLAGS_NOPCT0x20 0x20
88
89#define CPU_THT_EN(1L << 4) (1L << 4)
90#define CPU_MAXSTATE(sc)(1L << (sc)->sc_duty_wid) (1L << (sc)->sc_duty_wid)
91#define CPU_STATE(sc,pct)((pct * (1L << (sc)->sc_duty_wid) / 100) << (sc
)->sc_duty_off)
((pct * CPU_MAXSTATE(sc)(1L << (sc)->sc_duty_wid) / 100) << (sc)->sc_duty_off)
92#define CPU_STATEMASK(sc)(((1L << (sc)->sc_duty_wid) - 1) << (sc)->sc_duty_off
)
((CPU_MAXSTATE(sc)(1L << (sc)->sc_duty_wid) - 1) << (sc)->sc_duty_off)
93
94#define ACPI_MAX_C2_LATENCY100 100
95#define ACPI_MAX_C3_LATENCY1000 1000
96
97#define CSD_COORD_SW_ALL0xFC 0xFC
98#define CSD_COORD_SW_ANY0xFD 0xFD
99#define CSD_COORD_HW_ALL0xFE 0xFE
100
101/* Make sure throttling bits are valid,a=addr,o=offset,w=width */
102#define valid_throttle(o,w,a)(a && w && (o+w)<=31 && (o>4 ||
(o+w)<=4))
(a && w && (o+w)<=31 && (o>4 || (o+w)<=4))
103
104struct acpi_cstate
105{
106 SLIST_ENTRY(acpi_cstate)struct { struct acpi_cstate *sle_next; } link;
107
108 u_short state;
109 short method; /* CST_METH_* */
110 u_short flags; /* CST_FLAG_* */
111 u_short latency;
112 int power;
113 uint64_t address; /* or mwait hint */
114};
115
116unsigned long cst_stats[4] = { 0 };
117
118struct acpicpu_softc {
119 struct device sc_dev;
120 int sc_cpu;
121
122 int sc_duty_wid;
123 int sc_duty_off;
124 uint32_t sc_pblk_addr;
125 int sc_pblk_len;
126 int sc_flags;
127 unsigned long sc_prev_sleep;
128 unsigned long sc_last_itime;
129
130 struct cpu_info *sc_ci;
131 SLIST_HEAD(,acpi_cstate)struct { struct acpi_cstate *slh_first; } sc_cstates;
132
133 bus_space_tag_t sc_iot;
134 bus_space_handle_t sc_ioh;
135
136 struct acpi_softc *sc_acpi;
137 struct aml_node *sc_devnode;
138
139 int sc_pss_len; /* XXX */
140 int sc_ppc;
141 int sc_level;
142 struct acpicpu_pss *sc_pss;
143 size_t sc_pssfulllen;
144
145 struct acpicpu_pct sc_pct;
146 /* save compensation for pct access for lying bios' */
147 uint32_t sc_pct_stat_as;
148 uint32_t sc_pct_ctrl_as;
149 uint32_t sc_pct_stat_len;
150 uint32_t sc_pct_ctrl_len;
151 /*
152 * XXX: _PPC Change listener
153 * PPC changes can occur when for example a machine is disconnected
154 * from AC power and can no longer support the highest frequency or
155 * voltage when driven from the battery.
156 * Should probably be reimplemented as a list for now we assume only
157 * one listener
158 */
159 void (*sc_notify)(struct acpicpu_pss *, int);
160};
161
162void acpicpu_add_cstatepkg(struct aml_value *, void *);
163void acpicpu_add_cdeppkg(struct aml_value *, void *);
164int acpicpu_getppc(struct acpicpu_softc *);
165int acpicpu_getpct(struct acpicpu_softc *);
166int acpicpu_getpss(struct acpicpu_softc *);
167int acpicpu_getcst(struct acpicpu_softc *);
168void acpicpu_getcst_from_fadt(struct acpicpu_softc *);
169void acpicpu_print_one_cst(struct acpi_cstate *_cx);
170void acpicpu_print_cst(struct acpicpu_softc *_sc);
171void acpicpu_add_cstate(struct acpicpu_softc *_sc, int _state, int _method,
172 int _flags, int _latency, int _power, uint64_t _address);
173void acpicpu_set_pdc(struct acpicpu_softc *);
174void acpicpu_idle(void);
175
176#if 0
177void acpicpu_set_throttle(struct acpicpu_softc *, int);
178struct acpi_cstate *acpicpu_find_cstate(struct acpicpu_softc *, int);
179#endif
180
181struct cfattach acpicpu_ca = {
182 sizeof(struct acpicpu_softc), acpicpu_match, acpicpu_attach
183};
184
185struct cfdriver acpicpu_cd = {
186 NULL((void *)0), "acpicpu", DV_DULL
187};
188
189const char *acpicpu_hids[] = {
190 "ACPI0007",
191 NULL((void *)0)
192};
193
194extern int setperf_prio;
195
196#if 0
197void
198acpicpu_set_throttle(struct acpicpu_softc *sc, int level)
199{
200 uint32_t pbval;
201
202 if (sc->sc_flags & FLAGS_NOTHROTTLE0x08)
203 return;
204
205 /* Disable throttling control */
206 pbval = inl(sc->sc_pblk_addr)( (__builtin_constant_p((sc->sc_pblk_addr)) && (sc
->sc_pblk_addr) < 0x100) ? __inlc(sc->sc_pblk_addr) :
__inl(sc->sc_pblk_addr))
;
207 outl(sc->sc_pblk_addr, pbval & ~CPU_THT_EN)( (__builtin_constant_p((sc->sc_pblk_addr)) && (sc
->sc_pblk_addr) < 0x100) ? __outlc(sc->sc_pblk_addr,
pbval & ~(1L << 4)) : __outl(sc->sc_pblk_addr, pbval
& ~(1L << 4)))
;
208 if (level < 100) {
209 pbval &= ~CPU_STATEMASK(sc)(((1L << (sc)->sc_duty_wid) - 1) << (sc)->sc_duty_off
)
;
210 pbval |= CPU_STATE(sc, level)((level * (1L << (sc)->sc_duty_wid) / 100) << (
sc)->sc_duty_off)
;
211 outl(sc->sc_pblk_addr, pbval & ~CPU_THT_EN)( (__builtin_constant_p((sc->sc_pblk_addr)) && (sc
->sc_pblk_addr) < 0x100) ? __outlc(sc->sc_pblk_addr,
pbval & ~(1L << 4)) : __outl(sc->sc_pblk_addr, pbval
& ~(1L << 4)))
;
212 outl(sc->sc_pblk_addr, pbval | CPU_THT_EN)( (__builtin_constant_p((sc->sc_pblk_addr)) && (sc
->sc_pblk_addr) < 0x100) ? __outlc(sc->sc_pblk_addr,
pbval | (1L << 4)) : __outl(sc->sc_pblk_addr, pbval
| (1L << 4)))
;
213 }
214}
215
216struct acpi_cstate *
217acpicpu_find_cstate(struct acpicpu_softc *sc, int state)
218{
219 struct acpi_cstate *cx;
220
221 SLIST_FOREACH(cx, &sc->sc_cstates, link)for((cx) = ((&sc->sc_cstates)->slh_first); (cx) != (
(void *)0); (cx) = ((cx)->link.sle_next))
222 if (cx->state == state)
223 return cx;
224 return (NULL((void *)0));
225}
226#endif
227
228
229void
230acpicpu_set_pdc(struct acpicpu_softc *sc)
231{
232 struct aml_value cmd, osc_cmd[4];
233 struct aml_value res;
234 uint32_t cap;
235 uint32_t buf[3];
236
237 /* 4077A616-290C-47BE-9EBD-D87058713953 */
238 static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29,
239 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70,
240 0x58, 0x71, 0x39, 0x53 };
241 cap = ACPI_PDC_C_C1_HALT0x0002 | ACPI_PDC_P_FFH0x0001 | ACPI_PDC_C_C1_FFH0x0100
242 | ACPI_PDC_C_C2C3_FFH0x0200 | ACPI_PDC_SMP_P_SWCOORD0x0020 | ACPI_PDC_SMP_C2C30x0010
243 | ACPI_PDC_SMP_C1PT0x0008;
244
245 if (aml_searchname(sc->sc_devnode, "_OSC")) {
246 /* Query _OSC */
247 memset(&osc_cmd, 0, sizeof(osc_cmd))__builtin_memset((&osc_cmd), (0), (sizeof(osc_cmd)));
248 osc_cmd[0].type = AML_OBJTYPE_BUFFER;
249 osc_cmd[0].v_buffer_.vbuffer = (uint8_t *)&cpu_oscuuid;
250 osc_cmd[0].length = sizeof(cpu_oscuuid);
251
252 osc_cmd[1].type = AML_OBJTYPE_INTEGER;
253 osc_cmd[1].v_integer_.vinteger = 1;
254 osc_cmd[1].length = 1;
255
256 osc_cmd[2].type = AML_OBJTYPE_INTEGER;
257 osc_cmd[2].v_integer_.vinteger = 2;
258 osc_cmd[2].length = 1;
259
260 buf[0] = 1;
261 buf[1] = cap;
262 osc_cmd[3].type = AML_OBJTYPE_BUFFER;
263 osc_cmd[3].v_buffer_.vbuffer = (int8_t *)&buf;
264 osc_cmd[3].length = sizeof(buf);
265
266 aml_evalname(sc->sc_acpi, sc->sc_devnode, "_OSC",
267 4, osc_cmd, &res);
268
269 if (res.type != AML_OBJTYPE_BUFFER || res.length < 8) {
270 printf(": unable to query capabilities\n");
271 aml_freevalue(&res);
272 return;
273 }
274
275 /* Evaluate _OSC */
276 memset(&osc_cmd, 0, sizeof(osc_cmd))__builtin_memset((&osc_cmd), (0), (sizeof(osc_cmd)));
277 osc_cmd[0].type = AML_OBJTYPE_BUFFER;
278 osc_cmd[0].v_buffer_.vbuffer = (uint8_t *)&cpu_oscuuid;
279 osc_cmd[0].length = sizeof(cpu_oscuuid);
280
281 osc_cmd[1].type = AML_OBJTYPE_INTEGER;
282 osc_cmd[1].v_integer_.vinteger = 1;
283 osc_cmd[1].length = 1;
284
285 osc_cmd[2].type = AML_OBJTYPE_INTEGER;
286 osc_cmd[2].v_integer_.vinteger = 2;
287 osc_cmd[2].length = 1;
288
289 buf[0] = 0;
290 buf[1] = (*(uint32_t *)&res.v_buffer_.vbuffer[4]) & cap;
291 osc_cmd[3].type = AML_OBJTYPE_BUFFER;
292 osc_cmd[3].v_buffer_.vbuffer = (int8_t *)&buf;
293 osc_cmd[3].length = sizeof(buf);
294
295 aml_freevalue(&res);
296
297 aml_evalname(sc->sc_acpi, sc->sc_devnode, "_OSC",
298 4, osc_cmd, NULL((void *)0));
299 } else {
300 /* Evaluate _PDC */
301 memset(&cmd, 0, sizeof(cmd))__builtin_memset((&cmd), (0), (sizeof(cmd)));
302 cmd.type = AML_OBJTYPE_BUFFER;
303 cmd.v_buffer_.vbuffer = (uint8_t *)&buf;
304 cmd.length = sizeof(buf);
305
306 buf[0] = ACPI_PDC_REVID0x1;
307 buf[1] = 1;
308 buf[2] = cap;
309
310 aml_evalname(sc->sc_acpi, sc->sc_devnode, "_PDC",
311 1, &cmd, NULL((void *)0));
312 }
313}
314
315/*
316 * sanity check mwait hints against what cpuid told us
317 * ...but because intel screwed up, just check whether cpuid says
318 * the given state has _any_ substates.
319 */
320static int
321check_mwait_hints(int state, int hints)
322{
323 int cstate;
324 int num_substates;
325
326 if (cpu_mwait_size == 0)
327 return (0);
328 cstate = ((hints >> 4) & 0xf) + 1;
329 if (cstate == 16)
330 cstate = 0;
331 else if (cstate > 7) {
332 /* out of range of test against CPUID; just trust'em */
333 return (1);
334 }
335 num_substates = (cpu_mwait_states >> (4 * cstate)) & 0xf;
336 if (num_substates == 0) {
337 printf(": C%d bad (state %d has no substates)", state, cstate);
338 return (0);
339 }
340 return (1);
341}
342
343void
344acpicpu_add_cstate(struct acpicpu_softc *sc, int state, int method,
345 int flags, int latency, int power, uint64_t address)
346{
347 struct acpi_cstate *cx;
348
349 dnprintf(10," C%d: latency:.%4x power:%.4x addr:%.16llx\n",
350 state, latency, power, address);
351
352 /* add a new state, or overwrite the fallback C1 state? */
353 if (state != ACPI_STATE_C10x01 ||
354 (cx = SLIST_FIRST(&sc->sc_cstates)((&sc->sc_cstates)->slh_first)) == NULL((void *)0) ||
355 (cx->flags & CST_FLAG_FALLBACK0x4000) == 0) {
356 cx = malloc(sizeof(*cx), M_DEVBUF2, M_WAITOK0x0001);
357 SLIST_INSERT_HEAD(&sc->sc_cstates, cx, link)do { (cx)->link.sle_next = (&sc->sc_cstates)->slh_first
; (&sc->sc_cstates)->slh_first = (cx); } while (0)
;
358 }
359
360 cx->state = state;
361 cx->method = method;
362 cx->flags = flags;
363 cx->latency = latency;
364 cx->power = power;
365 cx->address = address;
366}
367
368/* Found a _CST object, add new cstate for each entry */
369void
370acpicpu_add_cstatepkg(struct aml_value *val, void *arg)
371{
372 struct acpicpu_softc *sc = arg;
373 uint64_t addr;
374 struct acpi_grd *grd;
375 int state, method, flags;
376
377#if defined(ACPI_DEBUG) && !defined(SMALL_KERNEL)
378 aml_showvalue(val);
379#endif
380 if (val->type != AML_OBJTYPE_PACKAGE || val->length != 4)
381 return;
382
383 /* range and sanity checks */
384 state = val->v_package_.vpackage[1]->v_integer_.vinteger;
385 if (state < 0 || state > 4)
386 return;
387 if (val->v_package_.vpackage[0]->type != AML_OBJTYPE_BUFFER) {
388 printf(": C%d (unexpected ACPI object type %d)",
389 state, val->v_package_.vpackage[0]->type);
390 return;
391 }
392 grd = (struct acpi_grd *)val->v_package_.vpackage[0]->v_buffer_.vbuffer;
393 if (val->v_package_.vpackage[0]->length != sizeof(*grd) + 2 ||
394 grd->grd_descriptor != LR_GENREGISTER0x82 ||
395 grd->grd_length != sizeof(grd->grd_gas) ||
396 val->v_package_.vpackage[0]->v_buffer_.vbuffer[sizeof(*grd)] != SRT_ENDTAG0x79) {
397 printf(": C%d (bogo buffer)", state);
398 return;
399 }
400
401 flags = 0;
402 switch (grd->grd_gas.address_space_id) {
403 case GAS_FUNCTIONAL_FIXED127:
404 if (grd->grd_gas.register_bit_width == 0) {
405 method = CST_METH_HALT0;
406 addr = 0;
407 } else {
408 /*
409 * In theory we should only do this for
410 * vendor 1 == Intel but other values crop up,
411 * presumably due to the normal ACPI spec confusion.
412 */
413 switch (grd->grd_gas.register_bit_offset) {
414 case 0x1:
415 method = CST_METH_IO_HALT1;
416 addr = grd->grd_gas.address;
417
418 /* i386 and amd64 I/O space is 16bits */
419 if (addr > 0xffff) {
420 printf(": C%d (bogo I/O addr %llx)",
421 state, addr);
422 return;
423 }
424 break;
425 case 0x2:
426 addr = grd->grd_gas.address;
427 if (!check_mwait_hints(state, addr))
428 return;
429 method = CST_METH_MWAIT2;
430 flags = grd->grd_gas.access_size;
431 break;
432 default:
433 printf(": C%d (unknown FFH class %d)",
434 state, grd->grd_gas.register_bit_offset);
435 return;
436 }
437 }
438 break;
439
440 case GAS_SYSTEM_IOSPACE1:
441 addr = grd->grd_gas.address;
442 if (grd->grd_gas.register_bit_width != 8 ||
443 grd->grd_gas.register_bit_offset != 0) {
444 printf(": C%d (unhandled %s spec: %d/%d)", state,
445 "I/O", grd->grd_gas.register_bit_width,
446 grd->grd_gas.register_bit_offset);
447 return;
448 }
449 method = CST_METH_GAS_IO3;
450 break;
451
452 default:
453 /* dump the GAS for analysis */
454 {
455 int i;
456 printf(": C%d (unhandled GAS:", state);
457 for (i = 0; i < sizeof(grd->grd_gas); i++)
458 printf(" %#x", ((u_char *)&grd->grd_gas)[i]);
459 printf(")");
460
461 }
462 return;
463 }
464
465 acpicpu_add_cstate(sc, state, method, flags,
466 val->v_package_.vpackage[2]->v_integer_.vinteger, val->v_package_.vpackage[3]->v_integer_.vinteger, addr);
467}
468
469
470/* Found a _CSD object, print the dependency */
471void
472acpicpu_add_cdeppkg(struct aml_value *val, void *arg)
473{
474 int64_t num_proc, coord_type, domain, cindex;
475
476 /*
477 * errors: unexpected object type, bad length, mismatched length,
478 * and bad CSD revision
479 */
480 if (val->type != AML_OBJTYPE_PACKAGE || val->length < 6 ||
481 val->length != val->v_package_.vpackage[0]->v_integer_.vinteger ||
482 val->v_package_.vpackage[1]->v_integer_.vinteger != 0) {
483#if 1 || defined(ACPI_DEBUG) && !defined(SMALL_KERNEL)
484 aml_showvalue(val);
485#endif
486 printf("bogus CSD\n");
487 return;
488 }
489
490 /* coordinating 'among' one CPU is trivial, ignore */
491 num_proc = val->v_package_.vpackage[4]->v_integer_.vinteger;
492 if (num_proc == 1)
493 return;
494
495 /* we practically assume the hardware will coordinate, so ignore */
496 coord_type = val->v_package_.vpackage[3]->v_integer_.vinteger;
497 if (coord_type == CSD_COORD_HW_ALL0xFE)
498 return;
499
500 domain = val->v_package_.vpackage[2]->v_integer_.vinteger;
501 cindex = val->v_package_.vpackage[5]->v_integer_.vinteger;
502 printf(": CSD (c=%#llx d=%lld n=%lld i=%lli)",
503 coord_type, domain, num_proc, cindex);
504}
505
506int
507acpicpu_getcst(struct acpicpu_softc *sc)
508{
509 struct aml_value res;
510 struct acpi_cstate *cx, *next_cx;
511 int use_nonmwait;
512
513 /* delete the existing list */
514 while ((cx = SLIST_FIRST(&sc->sc_cstates)((&sc->sc_cstates)->slh_first)) != NULL((void *)0)) {
515 SLIST_REMOVE_HEAD(&sc->sc_cstates, link)do { (&sc->sc_cstates)->slh_first = (&sc->sc_cstates
)->slh_first->link.sle_next; } while (0)
;
516 free(cx, M_DEVBUF2, sizeof(*cx));
517 }
518
519 /* provide a fallback C1-via-halt in case _CST's C1 is bogus */
520 acpicpu_add_cstate(sc, ACPI_STATE_C10x01, CST_METH_HALT0,
521 CST_FLAG_FALLBACK0x4000, 1, -1, 0);
522
523 if (aml_evalname(sc->sc_acpi, sc->sc_devnode, "_CST", 0, NULL((void *)0), &res))
524 return (1);
525
526 aml_foreachpkg(&res, 1, acpicpu_add_cstatepkg, sc);
527 aml_freevalue(&res);
528
529 /* only have fallback state? then no _CST objects were understood */
530 cx = SLIST_FIRST(&sc->sc_cstates)((&sc->sc_cstates)->slh_first);
531 if (cx->flags & CST_FLAG_FALLBACK0x4000)
532 return (1);
533
534 /*
535 * Skip states >= C2 if the CPU's LAPIC timer stops in deep
536 * states (i.e., it doesn't have the 'ARAT' bit set).
537 * Also keep track if all the states we'll use use mwait.
538 */
539 use_nonmwait = 0;
540 while ((next_cx = SLIST_NEXT(cx, link)((cx)->link.sle_next)) != NULL((void *)0)) {
541 if (cx->state > 1 &&
542 (sc->sc_ci->ci_feature_tpmflags & TPM_ARAT0x00000004) == 0)
543 cx->flags |= CST_FLAG_SKIP0x8000;
544 else if (cx->method != CST_METH_MWAIT2)
545 use_nonmwait = 1;
546 cx = next_cx;
547 }
548 if (use_nonmwait)
549 sc->sc_flags &= ~FLAGS_MWAIT_ONLY0x02;
550 else
551 sc->sc_flags |= FLAGS_MWAIT_ONLY0x02;
552
553 if (!aml_evalname(sc->sc_acpi, sc->sc_devnode, "_CSD", 0, NULL((void *)0), &res)) {
554 aml_foreachpkg(&res, 1, acpicpu_add_cdeppkg, sc);
555 aml_freevalue(&res);
556 }
557
558 return (0);
559}
560
561/*
562 * old-style fixed C-state info in the FADT.
563 * Note that this has extra restrictions on values and flags.
564 */
565void
566acpicpu_getcst_from_fadt(struct acpicpu_softc *sc)
567{
568 struct acpi_fadt *fadt = sc->sc_acpi->sc_fadt;
569 int flags;
570
571 /* FADT has to set flag to do C2 and higher on MP */
572 if ((fadt->flags & FADT_P_LVL2_UP0x00000008) == 0 && ncpus > 1)
573 return;
574
575 /* skip these C2 and C3 states if the CPU doesn't have ARAT */
576 flags = (sc->sc_ci->ci_feature_tpmflags & TPM_ARAT0x00000004)
577 ? 0 : CST_FLAG_SKIP0x8000;
578
579 /* Some systems don't export a full PBLK; reduce functionality */
580 if (sc->sc_pblk_len >= 5 && fadt->p_lvl2_lat <= ACPI_MAX_C2_LATENCY100) {
581 acpicpu_add_cstate(sc, ACPI_STATE_C20x02, CST_METH_GAS_IO3, flags,
582 fadt->p_lvl2_lat, -1, sc->sc_pblk_addr + 4);
583 }
584 if (sc->sc_pblk_len >= 6 && fadt->p_lvl3_lat <= ACPI_MAX_C3_LATENCY1000)
585 acpicpu_add_cstate(sc, ACPI_STATE_C30x03, CST_METH_GAS_IO3, flags,
586 fadt->p_lvl3_lat, -1, sc->sc_pblk_addr + 5);
587}
588
589
590void
591acpicpu_print_one_cst(struct acpi_cstate *cx)
592{
593 const char *meth = "";
594 int show_addr = 0;
595
596 switch (cx->method) {
597 case CST_METH_IO_HALT1:
598 show_addr = 1;
599 /* fallthrough */
600 case CST_METH_HALT0:
601 meth = " halt";
602 break;
603
604 case CST_METH_MWAIT2:
605 meth = " mwait";
606 show_addr = cx->address != 0;
607 break;
608
609 case CST_METH_GAS_IO3:
610 meth = " io";
611 show_addr = 1;
612 break;
613
614 }
615
616 printf(" %sC%d(", (cx->flags & CST_FLAG_SKIP0x8000 ? "!" : ""), cx->state);
617 if (cx->power != -1)
618 printf("%d", cx->power);
619 printf("@%d%s", cx->latency, meth);
620 if (cx->flags & ~CST_FLAG_SKIP0x8000) {
621 if (cx->flags & CST_FLAG_FALLBACK0x4000)
622 printf("!");
623 else
624 printf(".%x", (cx->flags & ~CST_FLAG_SKIP0x8000));
625 }
626 if (show_addr)
627 printf("@0x%llx", cx->address);
628 printf(")");
629}
630
631void
632acpicpu_print_cst(struct acpicpu_softc *sc)
633{
634 struct acpi_cstate *cx;
635 int i;
636
637 if (!SLIST_EMPTY(&sc->sc_cstates)(((&sc->sc_cstates)->slh_first) == ((void *)0))) {
638 printf(":");
639
640 i = 0;
641 SLIST_FOREACH(cx, &sc->sc_cstates, link)for((cx) = ((&sc->sc_cstates)->slh_first); (cx) != (
(void *)0); (cx) = ((cx)->link.sle_next))
{
642 if (i++)
643 printf(",");
644 acpicpu_print_one_cst(cx);
645 }
646 }
647}
648
649
650int
651acpicpu_match(struct device *parent, void *match, void *aux)
652{
653 struct acpi_attach_args *aa = aux;
654 struct cfdata *cf = match;
655 struct acpi_softc *acpi = (struct acpi_softc *)parent;
656
657 if (acpi_matchhids(aa, acpicpu_hids, cf->cf_driver->cd_name) &&
658 aa->aaa_node && aa->aaa_node->value &&
659 aa->aaa_node->value->type == AML_OBJTYPE_DEVICE) {
660 /*
661 * Record that we've seen a Device() CPU object,
662 * so we won't attach any Processor() nodes.
663 */
664 acpi->sc_skip_processor = 1;
665 return (1);
666 }
667
668 /* sanity */
669 if (aa->aaa_name == NULL((void *)0) ||
670 strcmp(aa->aaa_name, cf->cf_driver->cd_name) != 0 ||
671 aa->aaa_table != NULL((void *)0))
672 return (0);
673
674 return (1);
675}
676
677void
678acpicpu_attach(struct device *parent, struct device *self, void *aux)
679{
680 struct acpicpu_softc *sc = (struct acpicpu_softc *)self;
681 struct acpi_attach_args *aa = aux;
682 struct aml_value res;
683 int64_t uid;
684 int i;
685 uint32_t status = 0;
686 CPU_INFO_ITERATORint cii;
687 struct cpu_info *ci;
688
689 sc->sc_acpi = (struct acpi_softc *)parent;
690 sc->sc_devnode = aa->aaa_node;
691
692 SLIST_INIT(&sc->sc_cstates){ ((&sc->sc_cstates)->slh_first) = ((void *)0); };
693
694 if (aml_evalinteger(sc->sc_acpi, sc->sc_devnode,
695 "_UID", 0, NULL((void *)0), &uid) == 0)
696 sc->sc_cpu = uid;
697
698 if (aml_evalnode(sc->sc_acpi, sc->sc_devnode, 0, NULL((void *)0), &res) == 0) {
699 if (res.type == AML_OBJTYPE_PROCESSOR) {
700 sc->sc_cpu = res.v_processor_.vprocessor.proc_id;
701 sc->sc_pblk_addr = res.v_processor_.vprocessor.proc_addr;
702 sc->sc_pblk_len = res.v_processor_.vprocessor.proc_len;
703 }
704 aml_freevalue(&res);
705 }
706 sc->sc_duty_off = sc->sc_acpi->sc_fadt->duty_offset;
707 sc->sc_duty_wid = sc->sc_acpi->sc_fadt->duty_width;
708
709 /* link in the matching cpu_info */
710 CPU_INFO_FOREACH(cii, ci)for (cii = 0, ci = cpu_info_list; ci != ((void *)0); ci = ci->
ci_next)
711 if (ci->ci_acpi_proc_id == sc->sc_cpu) {
712 ci->ci_acpicpudev = self;
713 sc->sc_ci = ci;
714 break;
715 }
716 if (ci == NULL((void *)0)) {
717 printf(": no cpu matching ACPI ID %d\n", sc->sc_cpu);
718 return;
719 }
720
721 sc->sc_prev_sleep = 1000000;
722
723 acpicpu_set_pdc(sc);
724
725 if (!valid_throttle(sc->sc_duty_off, sc->sc_duty_wid, sc->sc_pblk_addr)(sc->sc_pblk_addr && sc->sc_duty_wid &&
(sc->sc_duty_off+sc->sc_duty_wid)<=31 && (sc
->sc_duty_off>4 || (sc->sc_duty_off+sc->sc_duty_wid
)<=4))
)
726 sc->sc_flags |= FLAGS_NOTHROTTLE0x08;
727#ifdef ACPI_DEBUG
728 printf(": %s: ", sc->sc_devnode->name);
729 printf("\n: hdr:%x pblk:%x,%x duty:%x,%x pstate:%x "
730 "(%ld throttling states)\n", sc->sc_acpi->sc_fadt->hdr_revisionhdr.revision,
731 sc->sc_pblk_addr, sc->sc_pblk_len, sc->sc_duty_off,
732 sc->sc_duty_wid, sc->sc_acpi->sc_fadt->pstate_cnt,
733 CPU_MAXSTATE(sc)(1L << (sc)->sc_duty_wid));
734#endif
735
736 /* Get C-States from _CST or FADT */
737 if (acpicpu_getcst(sc) || SLIST_EMPTY(&sc->sc_cstates)(((&sc->sc_cstates)->slh_first) == ((void *)0)))
738 acpicpu_getcst_from_fadt(sc);
739 else {
740 /* Notify BIOS we use _CST objects */
741 if (sc->sc_acpi->sc_fadt->cst_cnt) {
742 acpi_write_pmreg(sc->sc_acpi, ACPIREG_SMICMD0x0C, 0,
743 sc->sc_acpi->sc_fadt->cst_cnt);
744 }
745 }
746 if (!SLIST_EMPTY(&sc->sc_cstates)(((&sc->sc_cstates)->slh_first) == ((void *)0))) {
747 extern uint32_t acpi_force_bm;
748
749 cpu_idle_cycle_fcn = &acpicpu_idle;
750
751 /*
752 * C3 (and maybe C2?) needs BM_RLD to be set to
753 * wake the system
754 */
755 if (SLIST_FIRST(&sc->sc_cstates)((&sc->sc_cstates)->slh_first)->state > 1 && acpi_force_bm == 0) {
756 uint16_t en = acpi_read_pmreg(sc->sc_acpi,
757 ACPIREG_PM1_CNT0x10, 0);
758 if ((en & ACPI_PM1_BM_RLD0x0002) == 0) {
759 acpi_write_pmreg(sc->sc_acpi, ACPIREG_PM1_CNT0x10,
760 0, en | ACPI_PM1_BM_RLD0x0002);
761 acpi_force_bm = ACPI_PM1_BM_RLD0x0002;
762 }
763 }
764 }
765
766 if (acpicpu_getpss(sc)) {
767 sc->sc_flags |= FLAGS_NOPSS0x10;
768 } else {
769#ifdef ACPI_DEBUG
770 for (i = 0; i < sc->sc_pss_len; i++) {
771 dnprintf(20, "%d %d %d %d %d %d\n",
772 sc->sc_pss[i].pss_core_freq,
773 sc->sc_pss[i].pss_power,
774 sc->sc_pss[i].pss_trans_latency,
775 sc->sc_pss[i].pss_bus_latency,
776 sc->sc_pss[i].pss_ctrl,
777 sc->sc_pss[i].pss_status);
778 }
779 dnprintf(20, "\n");
780#endif
781 if (sc->sc_pss_len == 0) {
782 /* this should never happen */
783 printf("%s: invalid _PSS length\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
784 sc->sc_flags |= FLAGS_NOPSS0x10;
785 }
786
787 acpicpu_getppc(sc);
788 if (acpicpu_getpct(sc))
789 sc->sc_flags |= FLAGS_NOPCT0x20;
790 else if (sc->sc_pss_len > 0) {
791 /* Notify BIOS we are handling p-states */
792 if (sc->sc_acpi->sc_fadt->pstate_cnt) {
793 acpi_write_pmreg(sc->sc_acpi, ACPIREG_SMICMD0x0C,
794 0, sc->sc_acpi->sc_fadt->pstate_cnt);
795 }
796
797 aml_register_notify(sc->sc_devnode, NULL((void *)0),
798 acpicpu_notify, sc, ACPIDEV_NOPOLL0);
799
800 acpi_gasio(sc->sc_acpi, ACPI_IOREAD0,
801 sc->sc_pct.pct_status.grd_gas.address_space_id,
802 sc->sc_pct.pct_status.grd_gas.address,
803 sc->sc_pct_stat_as, sc->sc_pct_stat_as, &status);
804 sc->sc_level = (100 / sc->sc_pss_len) *
805 (sc->sc_pss_len - status);
806 dnprintf(20, "%s: cpu index %d, percentage %d\n",
807 DEVNAME(sc), status, sc->sc_level);
808 if (setperf_prio < 30) {
809 cpu_setperf = acpicpu_setperf;
810 acpicpu_set_notify(acpicpu_setperf_ppc_change);
811 setperf_prio = 30;
812 acpi_hasprocfvs = 1;
813 }
814 }
815 }
816
817 /*
818 * Nicely enumerate what power management capabilities
819 * ACPI CPU provides.
820 */
821 acpicpu_print_cst(sc);
822 if (!(sc->sc_flags & (FLAGS_NOPSS0x10 | FLAGS_NOPCT0x20)) ||
823 !(sc->sc_flags & FLAGS_NOPSS0x10)) {
824 printf("%c ", SLIST_EMPTY(&sc->sc_cstates)(((&sc->sc_cstates)->slh_first) == ((void *)0)) ? ':' : ',');
825
826 /*
827 * If acpicpu is itself providing the capability to transition
828 * states, enumerate them in the fashion that est and powernow
829 * would.
830 */
831 if (!(sc->sc_flags & (FLAGS_NOPSS0x10 | FLAGS_NOPCT0x20))) {
832 printf("FVS, ");
833 for (i = 0; i < sc->sc_pss_len - 1; i++)
834 printf("%d, ", sc->sc_pss[i].pss_core_freq);
835 printf("%d MHz", sc->sc_pss[i].pss_core_freq);
836 } else
837 printf("PSS");
838 }
839
840 printf("\n");
841}
842
843int
844acpicpu_getppc(struct acpicpu_softc *sc)
845{
846 struct aml_value res;
847
848 sc->sc_ppc = 0;
849
850 if (aml_evalname(sc->sc_acpi, sc->sc_devnode, "_PPC", 0, NULL((void *)0), &res)) {
851 dnprintf(10, "%s: no _PPC\n", DEVNAME(sc));
852 return (1);
853 }
854
855 sc->sc_ppc = aml_val2int(&res);
856 dnprintf(10, "%s: _PPC: %d\n", DEVNAME(sc), sc->sc_ppc);
857 aml_freevalue(&res);
858
859 return (0);
860}
861
862int
863acpicpu_getpct(struct acpicpu_softc *sc)
864{
865 struct aml_value res;
866 int rv = 1;
867
868 if (aml_evalname(sc->sc_acpi, sc->sc_devnode, "_PCT", 0, NULL((void *)0), &res)) {
869 dnprintf(20, "%s: no _PCT\n", DEVNAME(sc));
870 return (1);
871 }
872
873 if (res.length != 2) {
874 dnprintf(20, "%s: %s: invalid _PCT length\n", DEVNAME(sc),
875 sc->sc_devnode->name);
876 return (1);
877 }
878
879 memcpy(&sc->sc_pct.pct_ctrl, res.v_package[0]->v_buffer,__builtin_memcpy((&sc->sc_pct.pct_ctrl), (res._.vpackage
[0]->_.vbuffer), (sizeof sc->sc_pct.pct_ctrl))
880 sizeof sc->sc_pct.pct_ctrl)__builtin_memcpy((&sc->sc_pct.pct_ctrl), (res._.vpackage
[0]->_.vbuffer), (sizeof sc->sc_pct.pct_ctrl))
;
881 if (sc->sc_pct.pct_ctrl.grd_gas.address_space_id ==
882 GAS_FUNCTIONAL_FIXED127) {
883 dnprintf(20, "CTRL GASIO is functional fixed hardware.\n");
884 goto ffh;
885 }
886
887 memcpy(&sc->sc_pct.pct_status, res.v_package[1]->v_buffer,__builtin_memcpy((&sc->sc_pct.pct_status), (res._.vpackage
[1]->_.vbuffer), (sizeof sc->sc_pct.pct_status))
888 sizeof sc->sc_pct.pct_status)__builtin_memcpy((&sc->sc_pct.pct_status), (res._.vpackage
[1]->_.vbuffer), (sizeof sc->sc_pct.pct_status))
;
889 if (sc->sc_pct.pct_status.grd_gas.address_space_id ==
890 GAS_FUNCTIONAL_FIXED127) {
891 dnprintf(20, "CTRL GASIO is functional fixed hardware.\n");
892 goto ffh;
893 }
894
895 dnprintf(10, "_PCT(ctrl) : %02x %04x %02x %02x %02x %02x %016llx\n",
896 sc->sc_pct.pct_ctrl.grd_descriptor,
897 sc->sc_pct.pct_ctrl.grd_length,
898 sc->sc_pct.pct_ctrl.grd_gas.address_space_id,
899 sc->sc_pct.pct_ctrl.grd_gas.register_bit_width,
900 sc->sc_pct.pct_ctrl.grd_gas.register_bit_offset,
901 sc->sc_pct.pct_ctrl.grd_gas.access_size,
902 sc->sc_pct.pct_ctrl.grd_gas.address);
903
904 dnprintf(10, "_PCT(status): %02x %04x %02x %02x %02x %02x %016llx\n",
905 sc->sc_pct.pct_status.grd_descriptor,
906 sc->sc_pct.pct_status.grd_length,
907 sc->sc_pct.pct_status.grd_gas.address_space_id,
908 sc->sc_pct.pct_status.grd_gas.register_bit_width,
909 sc->sc_pct.pct_status.grd_gas.register_bit_offset,
910 sc->sc_pct.pct_status.grd_gas.access_size,
911 sc->sc_pct.pct_status.grd_gas.address);
912
913 /* if not set assume single 32 bit access */
914 sc->sc_pct_stat_as = sc->sc_pct.pct_status.grd_gas.register_bit_width
915 / 8;
916 if (sc->sc_pct_stat_as == 0)
917 sc->sc_pct_stat_as = 4;
918 sc->sc_pct_ctrl_as = sc->sc_pct.pct_ctrl.grd_gas.register_bit_width / 8;
919 if (sc->sc_pct_ctrl_as == 0)
920 sc->sc_pct_ctrl_as = 4;
921 sc->sc_pct_stat_len = sc->sc_pct.pct_status.grd_gas.access_size;
922 if (sc->sc_pct_stat_len == 0)
923 sc->sc_pct_stat_len = sc->sc_pct_stat_as;
924 sc->sc_pct_ctrl_len = sc->sc_pct.pct_ctrl.grd_gas.access_size;
925 if (sc->sc_pct_ctrl_len == 0)
926 sc->sc_pct_ctrl_len = sc->sc_pct_ctrl_as;
927
928 rv = 0;
929ffh:
930 aml_freevalue(&res);
931 return (rv);
932}
933
934int
935acpicpu_getpss(struct acpicpu_softc *sc)
936{
937 struct aml_value res;
938 int i, c, cf;
939
940 if (aml_evalname(sc->sc_acpi, sc->sc_devnode, "_PSS", 0, NULL((void *)0), &res)) {
941 dprintf("%s: no _PSS\n", DEVNAME(sc));
942 return (1);
943 }
944
945 free(sc->sc_pss, M_DEVBUF2, sc->sc_pssfulllen);
946
947 sc->sc_pss = mallocarray(res.length, sizeof(*sc->sc_pss), M_DEVBUF2,
948 M_WAITOK0x0001 | M_ZERO0x0008);
949 sc->sc_pssfulllen = res.length * sizeof(*sc->sc_pss);
950
951 c = 0;
952 for (i = 0; i < res.length; i++) {
953 cf = aml_val2int(res.v_package_.vpackage[i]->v_package_.vpackage[0]);
954
955 /* This heuristic comes from FreeBSDs
956 * dev/acpica/acpi_perf.c to weed out invalid PSS entries.
957 */
958 if (cf == sc->sc_pss[c].pss_core_freq) {
959 printf("%s: struck PSS entry, core frequency equals "
960 " last\n", sc->sc_dev.dv_xname);
961 continue;
962 }
963
964 if (cf == 0xFFFF || cf == 0x9999 || cf == 99999 || cf == 0) {
965 printf("%s: struck PSS entry, inappropriate core "
966 "frequency value\n", sc->sc_dev.dv_xname);
967 continue;
968 }
969
970 sc->sc_pss[c].pss_core_freq = cf;
971 sc->sc_pss[c].pss_power = aml_val2int(
972 res.v_package_.vpackage[i]->v_package_.vpackage[1]);
973 sc->sc_pss[c].pss_trans_latency = aml_val2int(
974 res.v_package_.vpackage[i]->v_package_.vpackage[2]);
975 sc->sc_pss[c].pss_bus_latency = aml_val2int(
976 res.v_package_.vpackage[i]->v_package_.vpackage[3]);
977 sc->sc_pss[c].pss_ctrl = aml_val2int(
978 res.v_package_.vpackage[i]->v_package_.vpackage[4]);
979 sc->sc_pss[c].pss_status = aml_val2int(
980 res.v_package_.vpackage[i]->v_package_.vpackage[5]);
981 c++;
982 }
983 sc->sc_pss_len = c;
984
985 aml_freevalue(&res);
986
987 return (0);
988}
989
990int
991acpicpu_fetch_pss(struct acpicpu_pss **pss)
992{
993 struct acpicpu_softc *sc;
994
995 /*
996 * XXX: According to the ACPI spec in an SMP system all processors
997 * are supposed to support the same states. For now we pray
998 * the bios ensures this...
999 */
1000
1001 sc = (struct acpicpu_softc *)cpu_info_primary(*(struct cpu_info *)((char *)&cpu_info_full_primary + 4096
*2 - __builtin_offsetof(struct cpu_info, ci_dev)))
.ci_acpicpudev;
1002 if (!sc)
1003 return 0;
1004 *pss = sc->sc_pss;
1005
1006 return (sc->sc_pss_len);
1007}
1008
1009int
1010acpicpu_notify(struct aml_node *node, int notify_type, void *arg)
1011{
1012 struct acpicpu_softc *sc = arg;
1013
1014 dnprintf(10, "acpicpu_notify: %.2x %s\n", notify_type,
1015 sc->sc_devnode->name);
1016
1017 switch (notify_type) {
1018 case 0x80: /* _PPC changed, retrieve new values */
1019 acpicpu_getppc(sc);
1020 acpicpu_getpss(sc);
1021 if (sc->sc_notify)
1022 sc->sc_notify(sc->sc_pss, sc->sc_pss_len);
1023 break;
1024
1025 case 0x81: /* _CST changed, retrieve new values */
1026 acpicpu_getcst(sc);
1027 printf("%s: notify", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1028 acpicpu_print_cst(sc);
1029 printf("\n");
1030 break;
1031
1032 default:
1033 printf("%s: unhandled cpu event %x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
1034 notify_type);
1035 break;
1036 }
1037
1038 return (0);
1039}
1040
1041void
1042acpicpu_set_notify(void (*func)(struct acpicpu_pss *, int))
1043{
1044 struct acpicpu_softc *sc;
1045
1046 sc = (struct acpicpu_softc *)cpu_info_primary(*(struct cpu_info *)((char *)&cpu_info_full_primary + 4096
*2 - __builtin_offsetof(struct cpu_info, ci_dev)))
.ci_acpicpudev;
1047 if (sc != NULL((void *)0))
1048 sc->sc_notify = func;
1049}
1050
1051void
1052acpicpu_setperf_ppc_change(struct acpicpu_pss *pss, int npss)
1053{
1054 struct acpicpu_softc *sc;
1055
1056 sc = (struct acpicpu_softc *)cpu_info_primary(*(struct cpu_info *)((char *)&cpu_info_full_primary + 4096
*2 - __builtin_offsetof(struct cpu_info, ci_dev)))
.ci_acpicpudev;
1057
1058 if (sc != NULL((void *)0))
1059 cpu_setperf(sc->sc_level);
1060}
1061
1062void
1063acpicpu_setperf(int level)
1064{
1065 struct acpicpu_softc *sc;
1066 struct acpicpu_pss *pss = NULL((void *)0);
1067 int idx, len;
1068 uint32_t status = 0;
1069
1070 sc = (struct acpicpu_softc *)curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})
->ci_acpicpudev;
1071
1072 dnprintf(10, "%s: acpicpu setperf level %d\n",
1073 sc->sc_devnode->name, level);
1074
1075 if (level < 0 || level > 100) {
1076 dnprintf(10, "%s: acpicpu setperf illegal percentage\n",
1077 sc->sc_devnode->name);
1078 return;
1079 }
1080
1081 /*
1082 * XXX this should be handled more gracefully and it needs to also do
1083 * the duty cycle method instead of pss exclusively
1084 */
1085 if (sc->sc_flags & FLAGS_NOPSS0x10 || sc->sc_flags & FLAGS_NOPCT0x20) {
1086 dnprintf(10, "%s: acpicpu no _PSS or _PCT\n",
1087 sc->sc_devnode->name);
1088 return;
1089 }
1090
1091 if (sc->sc_ppc)
1092 len = sc->sc_ppc;
1093 else
1094 len = sc->sc_pss_len;
1095 idx = (len - 1) - (level / (100 / len));
1096 if (idx < 0)
1097 idx = 0;
1098
1099 if (sc->sc_ppc)
1100 idx += sc->sc_pss_len - sc->sc_ppc;
1101
1102 if (idx > sc->sc_pss_len)
1103 idx = sc->sc_pss_len - 1;
1104
1105 dnprintf(10, "%s: acpicpu setperf index %d pss_len %d ppc %d\n",
1106 sc->sc_devnode->name, idx, sc->sc_pss_len, sc->sc_ppc);
1107
1108 pss = &sc->sc_pss[idx];
1109
1110#ifdef ACPI_DEBUG
1111 /* keep this for now since we will need this for debug in the field */
1112 printf("0 status: %x %llx %u %u ctrl: %x %llx %u %u\n",
1113 sc->sc_pct.pct_status.grd_gas.address_space_id,
1114 sc->sc_pct.pct_status.grd_gas.address,
1115 sc->sc_pct_stat_as, sc->sc_pct_stat_len,
1116 sc->sc_pct.pct_ctrl.grd_gas.address_space_id,
1117 sc->sc_pct.pct_ctrl.grd_gas.address,
1118 sc->sc_pct_ctrl_as, sc->sc_pct_ctrl_len);
1119#endif
1120 acpi_gasio(sc->sc_acpi, ACPI_IOREAD0,
1121 sc->sc_pct.pct_status.grd_gas.address_space_id,
1122 sc->sc_pct.pct_status.grd_gas.address, sc->sc_pct_stat_as,
1123 sc->sc_pct_stat_len, &status);
1124 dnprintf(20, "1 status: %u <- %u\n", status, pss->pss_status);
1125
1126 /* Are we already at the requested frequency? */
1127 if (status == pss->pss_status)
1128 return;
1129
1130 acpi_gasio(sc->sc_acpi, ACPI_IOWRITE1,
1131 sc->sc_pct.pct_ctrl.grd_gas.address_space_id,
1132 sc->sc_pct.pct_ctrl.grd_gas.address, sc->sc_pct_ctrl_as,
1133 sc->sc_pct_ctrl_len, &pss->pss_ctrl);
1134 dnprintf(20, "pss_ctrl: %x\n", pss->pss_ctrl);
1135
1136 acpi_gasio(sc->sc_acpi, ACPI_IOREAD0,
1137 sc->sc_pct.pct_status.grd_gas.address_space_id,
1138 sc->sc_pct.pct_status.grd_gas.address, sc->sc_pct_stat_as,
1139 sc->sc_pct_stat_as, &status);
1140 dnprintf(20, "2 status: %d\n", status);
1141
1142 /* Did the transition succeed? */
1143 if (status == pss->pss_status) {
1144 cpuspeed = pss->pss_core_freq;
1145 sc->sc_level = level;
1146 } else
1147 printf("%s: acpicpu setperf failed to alter frequency\n",
1148 sc->sc_devnode->name);
1149}
1150
1151void
1152acpicpu_idle(void)
1153{
1154 struct cpu_info *ci = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})
;
1155 struct acpicpu_softc *sc = (struct acpicpu_softc *)ci->ci_acpicpudev;
1156 struct acpi_cstate *best, *cx;
1157 unsigned long itime;
1158
1159 if (sc == NULL((void *)0)) {
1
Assuming 'sc' is not equal to NULL
2
Taking false branch
1160 __asm volatile("sti");
1161 panic("null acpicpu");
1162 }
1163
1164 /* possibly update the MWAIT_ONLY flag in cpu_info */
1165 if (sc->sc_flags & FLAGS_MWAIT_ONLY0x02) {
3
Assuming the condition is false
4
Taking false branch
1166 if ((ci->ci_mwait & MWAIT_ONLY0x4) == 0)
1167 atomic_setbits_intx86_atomic_setbits_u32(&ci->ci_mwait, MWAIT_ONLY0x4);
1168 } else if (ci->ci_mwait & MWAIT_ONLY0x4)
5
Assuming the condition is false
6
Taking false branch
1169 atomic_clearbits_intx86_atomic_clearbits_u32(&ci->ci_mwait, MWAIT_ONLY0x4);
1170
1171 /*
1172 * Find the first state with a latency we'll accept, ignoring
1173 * states marked skippable
1174 */
1175 best = cx = SLIST_FIRST(&sc->sc_cstates)((&sc->sc_cstates)->slh_first);
1176 while ((cx->flags & CST_FLAG_SKIP0x8000) ||
7
Assuming the condition is false
9
Loop condition is false. Execution continues on line 1183
1177 cx->latency * 3 > sc->sc_prev_sleep) {
8
Assuming the condition is false
1178 if ((cx = SLIST_NEXT(cx, link)((cx)->link.sle_next)) == NULL((void *)0))
1179 break;
1180 best = cx;
1181 }
1182
1183 if (best->state >= 3 &&
10
Assuming field 'state' is >= 3
13
Taking true branch
1184 (best->flags & CST_FLAG_MWAIT_BM_AVOIDANCE0x2) &&
11
Assuming the condition is true
1185 acpi_read_pmreg(acpi_softc, ACPIREG_PM1_STS0x0E, 0) & ACPI_PM1_BM_STS0x0010) {
12
Assuming the condition is true
1186 /* clear it and back off */
1187 acpi_write_pmreg(acpi_softc, ACPIREG_PM1_STS0x0E, 0,
1188 ACPI_PM1_BM_STS0x0010);
1189 while ((cx = SLIST_NEXT(cx, link)((cx)->link.sle_next)) != NULL((void *)0)) {
14
Value assigned to 'cx'
15
Assuming pointer value is null
16
Loop condition is false. Execution continues on line 1196
1190 if (cx->flags & CST_FLAG_SKIP0x8000)
1191 continue;
1192 if (cx->state < 3 ||
1193 (cx->flags & CST_FLAG_MWAIT_BM_AVOIDANCE0x2) == 0)
1194 break;
1195 }
1196 best = cx;
17
Null pointer value stored to 'best'
1197 }
1198
1199
1200 atomic_inc_long(&cst_stats[best->state])_atomic_inc_long(&cst_stats[best->state]);
18
Access to field 'state' results in a dereference of a null pointer (loaded from variable 'best')
1201
1202 itime = tick / 2;
1203 switch (best->method) {
1204 default:
1205 case CST_METH_HALT0:
1206 __asm volatile("sti; hlt");
1207 break;
1208
1209 case CST_METH_IO_HALT1:
1210 inb((u_short)best->address)( (__builtin_constant_p(((u_short)best->address)) &&
((u_short)best->address) < 0x100) ? __inbc((u_short)best
->address) : __inb((u_short)best->address))
;
1211 __asm volatile("sti; hlt");
1212 break;
1213
1214 case CST_METH_MWAIT2:
1215 {
1216 struct timeval start, stop;
1217 unsigned int hints;
1218
1219#ifdef __LP64__1
1220 if ((read_rflags() & PSL_I0x00000200) == 0)
1221 panic("idle with interrupts blocked!");
1222#else
1223 if ((read_eflags() & PSL_I0x00000200) == 0)
1224 panic("idle with interrupts blocked!");
1225#endif
1226
1227 /* something already queued? */
1228 if (!cpu_is_idle(ci)((ci)->ci_schedstate.spc_whichqs == 0))
1229 return;
1230
1231 /*
1232 * About to idle; setting the MWAIT_IN_IDLE bit tells
1233 * cpu_unidle() that it can't be a no-op and tells cpu_kick()
1234 * that it doesn't need to use an IPI. We also set the
1235 * MWAIT_KEEP_IDLING bit: those routines clear it to stop
1236 * the mwait. Once they're set, we do a final check of the
1237 * queue, in case another cpu called setrunqueue() and added
1238 * something to the queue and called cpu_unidle() between
1239 * the check in sched_idle() and here.
1240 */
1241 hints = (unsigned)best->address;
1242 microuptime(&start);
1243 atomic_setbits_intx86_atomic_setbits_u32(&ci->ci_mwait, MWAIT_IDLING(0x1 | 0x2));
1244 if (cpu_is_idle(ci)((ci)->ci_schedstate.spc_whichqs == 0)) {
1245 /* intel errata AAI65: cflush before monitor */
1246 if (ci->ci_cflushsz != 0 &&
1247 strcmp(cpu_vendor, "GenuineIntel") == 0) {
1248 membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0);
1249 clflush((unsigned long)&ci->ci_mwait);
1250 membar_sync()do { __asm volatile("mfence" ::: "memory"); } while (0);
1251 }
1252
1253 monitor(&ci->ci_mwait, 0, 0);
1254 if ((ci->ci_mwait & MWAIT_IDLING(0x1 | 0x2)) == MWAIT_IDLING(0x1 | 0x2))
1255 mwait(0, hints);
1256 }
1257
1258 microuptime(&stop);
1259 timersub(&stop, &start, &stop)do { (&stop)->tv_sec = (&stop)->tv_sec - (&
start)->tv_sec; (&stop)->tv_usec = (&stop)->
tv_usec - (&start)->tv_usec; if ((&stop)->tv_usec
< 0) { (&stop)->tv_sec--; (&stop)->tv_usec +=
1000000; } } while (0)
;
1260 itime = stop.tv_sec * 1000000 + stop.tv_usec;
1261
1262 /* done idling; let cpu_kick() know that an IPI is required */
1263 atomic_clearbits_intx86_atomic_clearbits_u32(&ci->ci_mwait, MWAIT_IDLING(0x1 | 0x2));
1264 break;
1265 }
1266
1267 case CST_METH_GAS_IO3:
1268 inb((u_short)best->address)( (__builtin_constant_p(((u_short)best->address)) &&
((u_short)best->address) < 0x100) ? __inbc((u_short)best
->address) : __inb((u_short)best->address))
;
1269 /* something harmless to give system time to change state */
1270 acpi_read_pmreg(acpi_softc, ACPIREG_PM1_STS0x0E, 0);
1271 break;
1272
1273 }
1274
1275 sc->sc_last_itime = itime;
1276 itime >>= 1;
1277 sc->sc_prev_sleep = (sc->sc_prev_sleep + (sc->sc_prev_sleep >> 1)
1278 + itime) >> 1;
1279}