Bug Summary

File:arch/amd64/amd64/tsc.c
Warning:line 95, column 4
Value stored to 'count' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name tsc.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/arch/amd64/amd64/tsc.c
1/* $OpenBSD: tsc.c,v 1.24 2021/08/31 15:11:54 kettenis Exp $ */
2/*
3 * Copyright (c) 2008 The NetBSD Foundation, Inc.
4 * Copyright (c) 2016,2017 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2017 Adam Steen <adam@adamsteen.com.au>
6 * Copyright (c) 2017 Mike Belopuhov <mike@openbsd.org>
7 * Copyright (c) 2019 Paul Irofti <paul@irofti.net>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22#include <sys/param.h>
23#include <sys/systm.h>
24#include <sys/timetc.h>
25#include <sys/atomic.h>
26
27#include <machine/cpu.h>
28#include <machine/cpufunc.h>
29
30#define RECALIBRATE_MAX_RETRIES5 5
31#define RECALIBRATE_SMI_THRESHOLD50000 50000
32#define RECALIBRATE_DELAY_THRESHOLD50 50
33
34int tsc_recalibrate;
35
36uint64_t tsc_frequency;
37int tsc_is_invariant;
38
39#define TSC_DRIFT_MAX250 250
40#define TSC_SKEW_MAX100 100
41int64_t tsc_drift_observed;
42
43volatile int64_t tsc_sync_val;
44volatile struct cpu_info *tsc_sync_cpu;
45
46u_int tsc_get_timecount(struct timecounter *tc);
47void tsc_delay(int usecs);
48
49#include "lapic.h"
50#if NLAPIC1 > 0
51extern u_int32_t lapic_per_second;
52#endif
53
54struct timecounter tsc_timecounter = {
55 .tc_get_timecount = tsc_get_timecount,
56 .tc_poll_pps = NULL((void *)0),
57 .tc_counter_mask = ~0u,
58 .tc_frequency = 0,
59 .tc_name = "tsc",
60 .tc_quality = -1000,
61 .tc_priv = NULL((void *)0),
62 .tc_user = TC_TSC1,
63};
64
65uint64_t
66tsc_freq_cpuid(struct cpu_info *ci)
67{
68 uint64_t count;
69 uint32_t eax, ebx, khz, dummy;
70
71 if (!strcmp(cpu_vendor, "GenuineIntel") &&
72 cpuid_level >= 0x15) {
73 eax = ebx = khz = dummy = 0;
74 CPUID(0x15, eax, ebx, khz, dummy)__asm volatile("cpuid" : "=a" (eax), "=b" (ebx), "=c" (khz), "=d"
(dummy) : "a" (0x15))
;
75 khz /= 1000;
76 if (khz == 0) {
77 switch (ci->ci_model) {
78 case 0x4e: /* Skylake mobile */
79 case 0x5e: /* Skylake desktop */
80 case 0x8e: /* Kabylake mobile */
81 case 0x9e: /* Kabylake desktop */
82 case 0xa5: /* CML-H CML-S62 CML-S102 */
83 case 0xa6: /* CML-U62 */
84 khz = 24000; /* 24.0 MHz */
85 break;
86 case 0x5f: /* Atom Denverton */
87 khz = 25000; /* 25.0 MHz */
88 break;
89 case 0x5c: /* Atom Goldmont */
90 khz = 19200; /* 19.2 MHz */
91 break;
92 }
93 }
94 if (ebx == 0 || eax == 0)
95 count = 0;
Value stored to 'count' is never read
96 else if ((count = (uint64_t)khz * (uint64_t)ebx / eax) != 0) {
97#if NLAPIC1 > 0
98 lapic_per_second = khz * 1000;
99#endif
100 return (count * 1000);
101 }
102 }
103
104 return (0);
105}
106
107void
108tsc_identify(struct cpu_info *ci)
109{
110 if (!(ci->ci_flags & CPUF_PRIMARY0x0008) ||
111 !(ci->ci_flags & CPUF_CONST_TSC0x0040) ||
112 !(ci->ci_flags & CPUF_INVAR_TSC0x0100))
113 return;
114
115 tsc_is_invariant = 1;
116
117 tsc_frequency = tsc_freq_cpuid(ci);
118 if (tsc_frequency > 0)
119 delay_func = tsc_delay;
120}
121
122static inline int
123get_tsc_and_timecount(struct timecounter *tc, uint64_t *tsc, uint64_t *count)
124{
125 uint64_t n, tsc1, tsc2;
126 int i;
127
128 for (i = 0; i < RECALIBRATE_MAX_RETRIES5; i++) {
129 tsc1 = rdtsc_lfence();
130 n = (tc->tc_get_timecount(tc) & tc->tc_counter_mask);
131 tsc2 = rdtsc_lfence();
132
133 if ((tsc2 - tsc1) < RECALIBRATE_SMI_THRESHOLD50000) {
134 *count = n;
135 *tsc = tsc2;
136 return (0);
137 }
138 }
139 return (1);
140}
141
142static inline uint64_t
143calculate_tsc_freq(uint64_t tsc1, uint64_t tsc2, int usec)
144{
145 uint64_t delta;
146
147 delta = (tsc2 - tsc1);
148 return (delta * 1000000 / usec);
149}
150
151static inline uint64_t
152calculate_tc_delay(struct timecounter *tc, uint64_t count1, uint64_t count2)
153{
154 uint64_t delta;
155
156 if (count2 < count1)
157 count2 += tc->tc_counter_mask;
158
159 delta = (count2 - count1);
160 return (delta * 1000000 / tc->tc_frequency);
161}
162
163uint64_t
164measure_tsc_freq(struct timecounter *tc)
165{
166 uint64_t count1, count2, frequency, min_freq, tsc1, tsc2;
167 u_long s;
168 int delay_usec, i, err1, err2, usec, success = 0;
169
170 /* warmup the timers */
171 for (i = 0; i < 3; i++) {
172 (void)tc->tc_get_timecount(tc);
173 (void)rdtsc();
174 }
175
176 min_freq = ULLONG_MAX0xffffffffffffffffULL;
177
178 delay_usec = 100000;
179 for (i = 0; i < 3; i++) {
180 s = intr_disable();
181
182 err1 = get_tsc_and_timecount(tc, &tsc1, &count1);
183 delay(delay_usec)(*delay_func)(delay_usec);
184 err2 = get_tsc_and_timecount(tc, &tsc2, &count2);
185
186 intr_restore(s);
187
188 if (err1 || err2)
189 continue;
190
191 usec = calculate_tc_delay(tc, count1, count2);
192
193 if ((usec < (delay_usec - RECALIBRATE_DELAY_THRESHOLD50)) ||
194 (usec > (delay_usec + RECALIBRATE_DELAY_THRESHOLD50)))
195 continue;
196
197 frequency = calculate_tsc_freq(tsc1, tsc2, usec);
198
199 min_freq = MIN(min_freq, frequency)(((min_freq)<(frequency))?(min_freq):(frequency));
200 success++;
201 }
202
203 return (success > 1 ? min_freq : 0);
204}
205
206void
207calibrate_tsc_freq(void)
208{
209 struct timecounter *reference = tsc_timecounter.tc_priv;
210 uint64_t freq;
211
212 if (!reference || !tsc_recalibrate)
213 return;
214
215 if ((freq = measure_tsc_freq(reference)) == 0)
216 return;
217 tsc_frequency = freq;
218 tsc_timecounter.tc_frequency = freq;
219 if (tsc_is_invariant)
220 tsc_timecounter.tc_quality = 2000;
221}
222
223void
224cpu_recalibrate_tsc(struct timecounter *tc)
225{
226 struct timecounter *reference = tsc_timecounter.tc_priv;
227
228 /* Prevent recalibration with a worse timecounter source */
229 if (reference && reference->tc_quality > tc->tc_quality)
230 return;
231
232 tsc_timecounter.tc_priv = tc;
233 calibrate_tsc_freq();
234}
235
236u_int
237tsc_get_timecount(struct timecounter *tc)
238{
239 return rdtsc_lfence() + curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})
->ci_tsc_skew;
240}
241
242void
243tsc_timecounter_init(struct cpu_info *ci, uint64_t cpufreq)
244{
245#ifdef TSC_DEBUG
246 printf("%s: TSC skew=%lld observed drift=%lld\n", ci->ci_dev->dv_xname,
247 (long long)ci->ci_tsc_skew, (long long)tsc_drift_observed);
248#endif
249 if (ci->ci_tsc_skew < -TSC_SKEW_MAX100 || ci->ci_tsc_skew > TSC_SKEW_MAX100) {
250 printf("%s: disabling user TSC (skew=%lld)\n",
251 ci->ci_dev->dv_xname, (long long)ci->ci_tsc_skew);
252 tsc_timecounter.tc_user = 0;
253 }
254
255 if (!(ci->ci_flags & CPUF_PRIMARY0x0008) ||
256 !(ci->ci_flags & CPUF_CONST_TSC0x0040) ||
257 !(ci->ci_flags & CPUF_INVAR_TSC0x0100))
258 return;
259
260 /* Newer CPUs don't require recalibration */
261 if (tsc_frequency > 0) {
262 tsc_timecounter.tc_frequency = tsc_frequency;
263 tsc_timecounter.tc_quality = 2000;
264 } else {
265 tsc_recalibrate = 1;
266 tsc_frequency = cpufreq;
267 tsc_timecounter.tc_frequency = cpufreq;
268 calibrate_tsc_freq();
269 }
270
271 if (tsc_drift_observed > TSC_DRIFT_MAX250) {
272 printf("ERROR: %lld cycle TSC drift observed\n",
273 (long long)tsc_drift_observed);
274 tsc_timecounter.tc_quality = -1000;
275 tsc_timecounter.tc_user = 0;
276 tsc_is_invariant = 0;
277 }
278
279 tc_init(&tsc_timecounter);
280}
281
282/*
283 * Record drift (in clock cycles). Called during AP startup.
284 */
285void
286tsc_sync_drift(int64_t drift)
287{
288 if (drift < 0)
289 drift = -drift;
290 if (drift > tsc_drift_observed)
291 tsc_drift_observed = drift;
292}
293
294/*
295 * Called during startup of APs, by the boot processor. Interrupts
296 * are disabled on entry.
297 */
298void
299tsc_read_bp(struct cpu_info *ci, uint64_t *bptscp, uint64_t *aptscp)
300{
301 uint64_t bptsc;
302
303 if (atomic_swap_ptr(&tsc_sync_cpu, ci)_atomic_swap_ptr((&tsc_sync_cpu), (ci)) != NULL((void *)0))
304 panic("tsc_sync_bp: 1");
305
306 /* Flag it and read our TSC. */
307 atomic_setbits_intx86_atomic_setbits_u32(&ci->ci_flags, CPUF_SYNCTSC0x0800);
308 bptsc = (rdtsc_lfence() >> 1);
309
310 /* Wait for remote to complete, and read ours again. */
311 while ((ci->ci_flags & CPUF_SYNCTSC0x0800) != 0)
312 membar_consumer()do { __asm volatile("" ::: "memory"); } while (0);
313 bptsc += (rdtsc_lfence() >> 1);
314
315 /* Wait for the results to come in. */
316 while (tsc_sync_cpu == ci)
317 CPU_BUSY_CYCLE()__asm volatile("pause": : : "memory");
318 if (tsc_sync_cpu != NULL((void *)0))
319 panic("tsc_sync_bp: 2");
320
321 *bptscp = bptsc;
322 *aptscp = tsc_sync_val;
323}
324
325void
326tsc_sync_bp(struct cpu_info *ci)
327{
328 uint64_t bptsc, aptsc;
329
330 tsc_read_bp(ci, &bptsc, &aptsc); /* discarded - cache effects */
331 tsc_read_bp(ci, &bptsc, &aptsc);
332
333 /* Compute final value to adjust for skew. */
334 ci->ci_tsc_skew = bptsc - aptsc;
335}
336
337/*
338 * Called during startup of AP, by the AP itself. Interrupts are
339 * disabled on entry.
340 */
341void
342tsc_post_ap(struct cpu_info *ci)
343{
344 uint64_t tsc;
345
346 /* Wait for go-ahead from primary. */
347 while ((ci->ci_flags & CPUF_SYNCTSC0x0800) == 0)
348 membar_consumer()do { __asm volatile("" ::: "memory"); } while (0);
349 tsc = (rdtsc_lfence() >> 1);
350
351 /* Instruct primary to read its counter. */
352 atomic_clearbits_intx86_atomic_clearbits_u32(&ci->ci_flags, CPUF_SYNCTSC0x0800);
353 tsc += (rdtsc_lfence() >> 1);
354
355 /* Post result. Ensure the whole value goes out atomically. */
356 (void)atomic_swap_64(&tsc_sync_val, tsc)_atomic_swap_64((&tsc_sync_val), (tsc));
357
358 if (atomic_swap_ptr(&tsc_sync_cpu, NULL)_atomic_swap_ptr((&tsc_sync_cpu), (((void *)0))) != ci)
359 panic("tsc_sync_ap");
360}
361
362void
363tsc_sync_ap(struct cpu_info *ci)
364{
365 tsc_post_ap(ci);
366 tsc_post_ap(ci);
367}
368
369void
370tsc_delay(int usecs)
371{
372 uint64_t interval, start;
373
374 interval = (uint64_t)usecs * tsc_frequency / 1000000;
375 start = rdtsc_lfence();
376 while (rdtsc_lfence() - start < interval)
377 CPU_BUSY_CYCLE()__asm volatile("pause": : : "memory");
378}