| File: | arch/amd64/amd64/identcpu.c |
| Warning: | line 882, column 17 The result of the left shift is undefined because the left operand is negative |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* $OpenBSD: identcpu.c,v 1.121 2021/11/02 23:30:15 mlarkin Exp $ */ | |||
| 2 | /* $NetBSD: identcpu.c,v 1.1 2003/04/26 18:39:28 fvdl Exp $ */ | |||
| 3 | ||||
| 4 | /* | |||
| 5 | * Copyright (c) 2003 Wasabi Systems, Inc. | |||
| 6 | * All rights reserved. | |||
| 7 | * | |||
| 8 | * Written by Frank van der Linden for Wasabi Systems, Inc. | |||
| 9 | * | |||
| 10 | * Redistribution and use in source and binary forms, with or without | |||
| 11 | * modification, are permitted provided that the following conditions | |||
| 12 | * are met: | |||
| 13 | * 1. Redistributions of source code must retain the above copyright | |||
| 14 | * notice, this list of conditions and the following disclaimer. | |||
| 15 | * 2. Redistributions in binary form must reproduce the above copyright | |||
| 16 | * notice, this list of conditions and the following disclaimer in the | |||
| 17 | * documentation and/or other materials provided with the distribution. | |||
| 18 | * 3. All advertising materials mentioning features or use of this software | |||
| 19 | * must display the following acknowledgement: | |||
| 20 | * This product includes software developed for the NetBSD Project by | |||
| 21 | * Wasabi Systems, Inc. | |||
| 22 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | |||
| 23 | * or promote products derived from this software without specific prior | |||
| 24 | * written permission. | |||
| 25 | * | |||
| 26 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | |||
| 27 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |||
| 28 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |||
| 29 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | |||
| 30 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||
| 31 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||
| 32 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||
| 33 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||
| 34 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||
| 35 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||
| 36 | * POSSIBILITY OF SUCH DAMAGE. | |||
| 37 | */ | |||
| 38 | ||||
| 39 | #include <sys/param.h> | |||
| 40 | #include <sys/systm.h> | |||
| 41 | #include <sys/sysctl.h> | |||
| 42 | ||||
| 43 | #include "vmm.h" | |||
| 44 | #include "pvbus.h" | |||
| 45 | ||||
| 46 | #include <machine/cpu.h> | |||
| 47 | #include <machine/cpufunc.h> | |||
| 48 | ||||
| 49 | #if NPVBUS1 > 0 | |||
| 50 | #include <dev/pv/pvvar.h> | |||
| 51 | #endif | |||
| 52 | ||||
| 53 | void replacesmap(void); | |||
| 54 | void replacemeltdown(void); | |||
| 55 | uint64_t cpu_freq(struct cpu_info *); | |||
| 56 | void tsc_identify(struct cpu_info *); | |||
| 57 | void tsc_timecounter_init(struct cpu_info *, uint64_t); | |||
| 58 | #if NVMM1 > 0 | |||
| 59 | void cpu_check_vmm_cap(struct cpu_info *); | |||
| 60 | #endif /* NVMM > 0 */ | |||
| 61 | ||||
| 62 | /* sysctl wants this. */ | |||
| 63 | char cpu_model[48]; | |||
| 64 | int cpuspeed; | |||
| 65 | ||||
| 66 | int amd64_has_xcrypt; | |||
| 67 | #ifdef CRYPTO1 | |||
| 68 | int amd64_has_pclmul; | |||
| 69 | int amd64_has_aesni; | |||
| 70 | #endif | |||
| 71 | int has_rdrand; | |||
| 72 | int has_rdseed; | |||
| 73 | ||||
| 74 | const struct { | |||
| 75 | u_int32_t bit; | |||
| 76 | char str[12]; | |||
| 77 | } cpu_cpuid_features[] = { | |||
| 78 | { CPUID_FPU0x00000001, "FPU" }, | |||
| 79 | { CPUID_VME0x00000002, "VME" }, | |||
| 80 | { CPUID_DE0x00000004, "DE" }, | |||
| 81 | { CPUID_PSE0x00000008, "PSE" }, | |||
| 82 | { CPUID_TSC0x00000010, "TSC" }, | |||
| 83 | { CPUID_MSR0x00000020, "MSR" }, | |||
| 84 | { CPUID_PAE0x00000040, "PAE" }, | |||
| 85 | { CPUID_MCE0x00000080, "MCE" }, | |||
| 86 | { CPUID_CX80x00000100, "CX8" }, | |||
| 87 | { CPUID_APIC0x00000200, "APIC" }, | |||
| 88 | { CPUID_SEP0x00000800, "SEP" }, | |||
| 89 | { CPUID_MTRR0x00001000, "MTRR" }, | |||
| 90 | { CPUID_PGE0x00002000, "PGE" }, | |||
| 91 | { CPUID_MCA0x00004000, "MCA" }, | |||
| 92 | { CPUID_CMOV0x00008000, "CMOV" }, | |||
| 93 | { CPUID_PAT0x00010000, "PAT" }, | |||
| 94 | { CPUID_PSE360x00020000, "PSE36" }, | |||
| 95 | { CPUID_PSN0x00040000, "PSN" }, | |||
| 96 | { CPUID_CFLUSH0x00080000, "CFLUSH" }, | |||
| 97 | { CPUID_DS0x00200000, "DS" }, | |||
| 98 | { CPUID_ACPI0x00400000, "ACPI" }, | |||
| 99 | { CPUID_MMX0x00800000, "MMX" }, | |||
| 100 | { CPUID_FXSR0x01000000, "FXSR" }, | |||
| 101 | { CPUID_SSE0x02000000, "SSE" }, | |||
| 102 | { CPUID_SSE20x04000000, "SSE2" }, | |||
| 103 | { CPUID_SS0x08000000, "SS" }, | |||
| 104 | { CPUID_HTT0x10000000, "HTT" }, | |||
| 105 | { CPUID_TM0x20000000, "TM" }, | |||
| 106 | { CPUID_PBE0x80000000, "PBE" } | |||
| 107 | }, cpu_ecpuid_features[] = { | |||
| 108 | { CPUID_MPC0x00080000, "MPC" }, | |||
| 109 | { CPUID_NXE0x00100000, "NXE" }, | |||
| 110 | { CPUID_MMXX0x00400000, "MMXX" }, | |||
| 111 | { CPUID_FFXSR0x02000000, "FFXSR" }, | |||
| 112 | { CPUID_PAGE1GB0x04000000, "PAGE1GB" }, | |||
| 113 | { CPUID_RDTSCP0x08000000, "RDTSCP" }, | |||
| 114 | { CPUID_LONG0x20000000, "LONG" }, | |||
| 115 | { CPUID_3DNOW20x40000000, "3DNOW2" }, | |||
| 116 | { CPUID_3DNOW0x80000000, "3DNOW" } | |||
| 117 | }, cpu_cpuid_ecxfeatures[] = { | |||
| 118 | { CPUIDECX_SSE30x00000001, "SSE3" }, | |||
| 119 | { CPUIDECX_PCLMUL0x00000002, "PCLMUL" }, | |||
| 120 | { CPUIDECX_DTES640x00000004, "DTES64" }, | |||
| 121 | { CPUIDECX_MWAIT0x00000008, "MWAIT" }, | |||
| 122 | { CPUIDECX_DSCPL0x00000010, "DS-CPL" }, | |||
| 123 | { CPUIDECX_VMX0x00000020, "VMX" }, | |||
| 124 | { CPUIDECX_SMX0x00000040, "SMX" }, | |||
| 125 | { CPUIDECX_EST0x00000080, "EST" }, | |||
| 126 | { CPUIDECX_TM20x00000100, "TM2" }, | |||
| 127 | { CPUIDECX_SSSE30x00000200, "SSSE3" }, | |||
| 128 | { CPUIDECX_CNXTID0x00000400, "CNXT-ID" }, | |||
| 129 | { CPUIDECX_SDBG0x00000800, "SDBG" }, | |||
| 130 | { CPUIDECX_FMA30x00001000, "FMA3" }, | |||
| 131 | { CPUIDECX_CX160x00002000, "CX16" }, | |||
| 132 | { CPUIDECX_XTPR0x00004000, "xTPR" }, | |||
| 133 | { CPUIDECX_PDCM0x00008000, "PDCM" }, | |||
| 134 | { CPUIDECX_PCID0x00020000, "PCID" }, | |||
| 135 | { CPUIDECX_DCA0x00040000, "DCA" }, | |||
| 136 | { CPUIDECX_SSE410x00080000, "SSE4.1" }, | |||
| 137 | { CPUIDECX_SSE420x00100000, "SSE4.2" }, | |||
| 138 | { CPUIDECX_X2APIC0x00200000, "x2APIC" }, | |||
| 139 | { CPUIDECX_MOVBE0x00400000, "MOVBE" }, | |||
| 140 | { CPUIDECX_POPCNT0x00800000, "POPCNT" }, | |||
| 141 | { CPUIDECX_DEADLINE0x01000000, "DEADLINE" }, | |||
| 142 | { CPUIDECX_AES0x02000000, "AES" }, | |||
| 143 | { CPUIDECX_XSAVE0x04000000, "XSAVE" }, | |||
| 144 | { CPUIDECX_OSXSAVE0x08000000, "OSXSAVE" }, | |||
| 145 | { CPUIDECX_AVX0x10000000, "AVX" }, | |||
| 146 | { CPUIDECX_F16C0x20000000, "F16C" }, | |||
| 147 | { CPUIDECX_RDRAND0x40000000, "RDRAND" }, | |||
| 148 | { CPUIDECX_HV0x80000000, "HV" }, | |||
| 149 | }, cpu_ecpuid_ecxfeatures[] = { | |||
| 150 | { CPUIDECX_LAHF0x00000001, "LAHF" }, | |||
| 151 | { CPUIDECX_CMPLEG0x00000002, "CMPLEG" }, | |||
| 152 | { CPUIDECX_SVM0x00000004, "SVM" }, | |||
| 153 | { CPUIDECX_EAPICSP0x00000008, "EAPICSP"}, | |||
| 154 | { CPUIDECX_AMCR80x00000010, "AMCR8"}, | |||
| 155 | { CPUIDECX_ABM0x00000020, "ABM" }, | |||
| 156 | { CPUIDECX_SSE4A0x00000040, "SSE4A" }, | |||
| 157 | { CPUIDECX_MASSE0x00000080, "MASSE" }, | |||
| 158 | { CPUIDECX_3DNOWP0x00000100, "3DNOWP" }, | |||
| 159 | { CPUIDECX_OSVW0x00000200, "OSVW" }, | |||
| 160 | { CPUIDECX_IBS0x00000400, "IBS" }, | |||
| 161 | { CPUIDECX_XOP0x00000800, "XOP" }, | |||
| 162 | { CPUIDECX_SKINIT0x00001000, "SKINIT" }, | |||
| 163 | { CPUIDECX_LWP0x00008000, "WDT" }, | |||
| 164 | { CPUIDECX_FMA40x00010000, "FMA4" }, | |||
| 165 | { CPUIDECX_TCE0x00020000, "TCE" }, | |||
| 166 | { CPUIDECX_NODEID0x00080000, "NODEID" }, | |||
| 167 | { CPUIDECX_TBM0x00200000, "TBM" }, | |||
| 168 | { CPUIDECX_TOPEXT0x00400000, "TOPEXT" }, | |||
| 169 | { CPUIDECX_CPCTR0x00800000, "CPCTR" }, | |||
| 170 | { CPUIDECX_DBKP0x04000000, "DBKP" }, | |||
| 171 | { CPUIDECX_PERFTSC0x08000000, "PERFTSC" }, | |||
| 172 | { CPUIDECX_PCTRL30x10000000, "PCTRL3" }, | |||
| 173 | { CPUIDECX_MWAITX0x20000000, "MWAITX" }, | |||
| 174 | }, cpu_seff0_ebxfeatures[] = { | |||
| 175 | { SEFF0EBX_FSGSBASE0x00000001, "FSGSBASE" }, | |||
| 176 | { SEFF0EBX_TSC_ADJUST0x00000002, "TSC_ADJUST" }, | |||
| 177 | { SEFF0EBX_SGX0x00000004, "SGX" }, | |||
| 178 | { SEFF0EBX_BMI10x00000008, "BMI1" }, | |||
| 179 | { SEFF0EBX_HLE0x00000010, "HLE" }, | |||
| 180 | { SEFF0EBX_AVX20x00000020, "AVX2" }, | |||
| 181 | { SEFF0EBX_SMEP0x00000080, "SMEP" }, | |||
| 182 | { SEFF0EBX_BMI20x00000100, "BMI2" }, | |||
| 183 | { SEFF0EBX_ERMS0x00000200, "ERMS" }, | |||
| 184 | { SEFF0EBX_INVPCID0x00000400, "INVPCID" }, | |||
| 185 | { SEFF0EBX_RTM0x00000800, "RTM" }, | |||
| 186 | { SEFF0EBX_PQM0x00001000, "PQM" }, | |||
| 187 | { SEFF0EBX_MPX0x00004000, "MPX" }, | |||
| 188 | { SEFF0EBX_AVX512F0x00010000, "AVX512F" }, | |||
| 189 | { SEFF0EBX_AVX512DQ0x00020000, "AVX512DQ" }, | |||
| 190 | { SEFF0EBX_RDSEED0x00040000, "RDSEED" }, | |||
| 191 | { SEFF0EBX_ADX0x00080000, "ADX" }, | |||
| 192 | { SEFF0EBX_SMAP0x00100000, "SMAP" }, | |||
| 193 | { SEFF0EBX_AVX512IFMA0x00200000, "AVX512IFMA" }, | |||
| 194 | { SEFF0EBX_PCOMMIT0x00400000, "PCOMMIT" }, | |||
| 195 | { SEFF0EBX_CLFLUSHOPT0x00800000, "CLFLUSHOPT" }, | |||
| 196 | { SEFF0EBX_CLWB0x01000000, "CLWB" }, | |||
| 197 | { SEFF0EBX_PT0x02000000, "PT" }, | |||
| 198 | { SEFF0EBX_AVX512PF0x04000000, "AVX512PF" }, | |||
| 199 | { SEFF0EBX_AVX512ER0x08000000, "AVX512ER" }, | |||
| 200 | { SEFF0EBX_AVX512CD0x10000000, "AVX512CD" }, | |||
| 201 | { SEFF0EBX_SHA0x20000000, "SHA" }, | |||
| 202 | { SEFF0EBX_AVX512BW0x40000000, "AVX512BW" }, | |||
| 203 | { SEFF0EBX_AVX512VL0x80000000, "AVX512VL" }, | |||
| 204 | }, cpu_seff0_ecxfeatures[] = { | |||
| 205 | { SEFF0ECX_PREFETCHWT10x00000001, "PREFETCHWT1" }, | |||
| 206 | { SEFF0ECX_AVX512VBMI0x00000002, "AVX512VBMI" }, | |||
| 207 | { SEFF0ECX_UMIP0x00000004, "UMIP" }, | |||
| 208 | { SEFF0ECX_PKU0x00000008, "PKU" }, | |||
| 209 | }, cpu_seff0_edxfeatures[] = { | |||
| 210 | { SEFF0EDX_AVX512_4FNNIW0x00000004, "AVX512FNNIW" }, | |||
| 211 | { SEFF0EDX_AVX512_4FMAPS0x00000008, "AVX512FMAPS" }, | |||
| 212 | { SEFF0EDX_SRBDS_CTRL0x00000200, "SRBDS_CTRL" }, | |||
| 213 | { SEFF0EDX_MD_CLEAR0x00000400, "MD_CLEAR" }, | |||
| 214 | { SEFF0EDX_TSXFA0x00002000, "TSXFA" }, | |||
| 215 | { SEFF0EDX_IBRS0x04000000, "IBRS,IBPB" }, | |||
| 216 | { SEFF0EDX_STIBP0x08000000, "STIBP" }, | |||
| 217 | { SEFF0EDX_L1DF0x10000000, "L1DF" }, | |||
| 218 | /* SEFF0EDX_ARCH_CAP (not printed) */ | |||
| 219 | { SEFF0EDX_SSBD0x80000000, "SSBD" }, | |||
| 220 | }, cpu_tpm_eaxfeatures[] = { | |||
| 221 | { TPM_SENSOR0x00000001, "SENSOR" }, | |||
| 222 | { TPM_ARAT0x00000004, "ARAT" }, | |||
| 223 | }, cpu_cpuid_perf_eax[] = { | |||
| 224 | { CPUIDEAX_VERID0x000000ff, "PERF" }, | |||
| 225 | }, cpu_cpuid_apmi_edx[] = { | |||
| 226 | { CPUIDEDX_ITSC(1 << 8), "ITSC" }, | |||
| 227 | }, cpu_amdspec_ebxfeatures[] = { | |||
| 228 | { CPUIDEBX_IBPB(1ULL << 12), "IBPB" }, | |||
| 229 | { CPUIDEBX_IBRS(1ULL << 14), "IBRS" }, | |||
| 230 | { CPUIDEBX_STIBP(1ULL << 15), "STIBP" }, | |||
| 231 | { CPUIDEBX_SSBD(1ULL << 24), "SSBD" }, | |||
| 232 | { CPUIDEBX_VIRT_SSBD(1ULL << 25), "VIRTSSBD" }, | |||
| 233 | { CPUIDEBX_SSBD_NOTREQ(1ULL << 26), "SSBDNR" }, | |||
| 234 | }, cpu_xsave_extfeatures[] = { | |||
| 235 | { XSAVE_XSAVEOPT0x1UL, "XSAVEOPT" }, | |||
| 236 | { XSAVE_XSAVEC0x2UL, "XSAVEC" }, | |||
| 237 | { XSAVE_XGETBV10x4UL, "XGETBV1" }, | |||
| 238 | { XSAVE_XSAVES0x8UL, "XSAVES" }, | |||
| 239 | }; | |||
| 240 | ||||
| 241 | int | |||
| 242 | cpu_amd64speed(int *freq) | |||
| 243 | { | |||
| 244 | *freq = cpuspeed; | |||
| 245 | return (0); | |||
| 246 | } | |||
| 247 | ||||
| 248 | #ifndef SMALL_KERNEL | |||
| 249 | void intelcore_update_sensor(void *args); | |||
| 250 | /* | |||
| 251 | * Temperature read on the CPU is relative to the maximum | |||
| 252 | * temperature supported by the CPU, Tj(Max). | |||
| 253 | * Refer to: | |||
| 254 | * 64-ia-32-architectures-software-developer-vol-3c-part-3-manual.pdf | |||
| 255 | * Section 35 and | |||
| 256 | * http://www.intel.com/content/dam/www/public/us/en/documents/ | |||
| 257 | * white-papers/cpu-monitoring-dts-peci-paper.pdf | |||
| 258 | * | |||
| 259 | * The temperature on Intel CPUs can be between 70 and 105 degC, since | |||
| 260 | * Westmere we can read the TJmax from the die. For older CPUs we have | |||
| 261 | * to guess or use undocumented MSRs. Then we subtract the temperature | |||
| 262 | * portion of thermal status from max to get current temperature. | |||
| 263 | */ | |||
| 264 | void | |||
| 265 | intelcore_update_sensor(void *args) | |||
| 266 | { | |||
| 267 | struct cpu_info *ci = (struct cpu_info *) args; | |||
| 268 | u_int64_t msr; | |||
| 269 | int max = 100; | |||
| 270 | ||||
| 271 | /* Only some Core family chips have MSR_TEMPERATURE_TARGET. */ | |||
| 272 | if (ci->ci_model == 0x0e && | |||
| 273 | (rdmsr(MSR_TEMPERATURE_TARGET_UNDOCUMENTED0x0ee) & | |||
| 274 | MSR_TEMPERATURE_TARGET_LOW_BIT_UNDOCUMENTED0x40000000)) | |||
| 275 | max = 85; | |||
| 276 | ||||
| 277 | /* | |||
| 278 | * Newer CPUs can tell you what their max temperature is. | |||
| 279 | * See: '64-ia-32-architectures-software-developer- | |||
| 280 | * vol-3c-part-3-manual.pdf' | |||
| 281 | */ | |||
| 282 | if (ci->ci_model > 0x17 && ci->ci_model != 0x1c && | |||
| 283 | ci->ci_model != 0x26 && ci->ci_model != 0x27 && | |||
| 284 | ci->ci_model != 0x35 && ci->ci_model != 0x36) | |||
| 285 | max = MSR_TEMPERATURE_TARGET_TJMAX((((rdmsr(0x1a2)) >> 16) & 0xff) | |||
| 286 | rdmsr(MSR_TEMPERATURE_TARGET))(((rdmsr(0x1a2)) >> 16) & 0xff); | |||
| 287 | ||||
| 288 | msr = rdmsr(MSR_THERM_STATUS0x19c); | |||
| 289 | if (msr & MSR_THERM_STATUS_VALID_BIT0x80000000) { | |||
| 290 | ci->ci_sensor.value = max - MSR_THERM_STATUS_TEMP(msr)((msr >> 16) & 0x7f); | |||
| 291 | /* micro degrees */ | |||
| 292 | ci->ci_sensor.value *= 1000000; | |||
| 293 | /* kelvin */ | |||
| 294 | ci->ci_sensor.value += 273150000; | |||
| 295 | ci->ci_sensor.flags &= ~SENSOR_FINVALID0x0001; | |||
| 296 | } else { | |||
| 297 | ci->ci_sensor.value = 0; | |||
| 298 | ci->ci_sensor.flags |= SENSOR_FINVALID0x0001; | |||
| 299 | } | |||
| 300 | } | |||
| 301 | ||||
| 302 | #endif | |||
| 303 | ||||
| 304 | void (*setperf_setup)(struct cpu_info *); | |||
| 305 | ||||
| 306 | void via_nano_setup(struct cpu_info *ci); | |||
| 307 | ||||
| 308 | void cpu_topology(struct cpu_info *ci); | |||
| 309 | ||||
| 310 | void | |||
| 311 | via_nano_setup(struct cpu_info *ci) | |||
| 312 | { | |||
| 313 | u_int32_t regs[4], val; | |||
| 314 | u_int64_t msreg; | |||
| 315 | int model = (ci->ci_signature >> 4) & 15; | |||
| 316 | ||||
| 317 | if (model >= 9) { | |||
| 318 | CPUID(0xC0000000, regs[0], regs[1], regs[2], regs[3])__asm volatile("cpuid" : "=a" (regs[0]), "=b" (regs[1]), "=c" (regs[2]), "=d" (regs[3]) : "a" (0xC0000000)); | |||
| 319 | val = regs[0]; | |||
| 320 | if (val >= 0xC0000001) { | |||
| 321 | CPUID(0xC0000001, regs[0], regs[1], regs[2], regs[3])__asm volatile("cpuid" : "=a" (regs[0]), "=b" (regs[1]), "=c" (regs[2]), "=d" (regs[3]) : "a" (0xC0000001)); | |||
| 322 | val = regs[3]; | |||
| 323 | } else | |||
| 324 | val = 0; | |||
| 325 | ||||
| 326 | if (val & (C3_CPUID_HAS_RNG0x000004 | C3_CPUID_HAS_ACE0x000040)) | |||
| 327 | printf("%s:", ci->ci_dev->dv_xname); | |||
| 328 | ||||
| 329 | /* Enable RNG if present and disabled */ | |||
| 330 | if (val & C3_CPUID_HAS_RNG0x000004) { | |||
| 331 | extern int viac3_rnd_present; | |||
| 332 | ||||
| 333 | if (!(val & C3_CPUID_DO_RNG0x000008)) { | |||
| 334 | msreg = rdmsr(0x110B); | |||
| 335 | msreg |= 0x40; | |||
| 336 | wrmsr(0x110B, msreg); | |||
| 337 | } | |||
| 338 | viac3_rnd_present = 1; | |||
| 339 | printf(" RNG"); | |||
| 340 | } | |||
| 341 | ||||
| 342 | /* Enable AES engine if present and disabled */ | |||
| 343 | if (val & C3_CPUID_HAS_ACE0x000040) { | |||
| 344 | #ifdef CRYPTO1 | |||
| 345 | if (!(val & C3_CPUID_DO_ACE0x000080)) { | |||
| 346 | msreg = rdmsr(0x1107); | |||
| 347 | msreg |= (0x01 << 28); | |||
| 348 | wrmsr(0x1107, msreg); | |||
| 349 | } | |||
| 350 | amd64_has_xcrypt |= C3_HAS_AES1; | |||
| 351 | #endif /* CRYPTO */ | |||
| 352 | printf(" AES"); | |||
| 353 | } | |||
| 354 | ||||
| 355 | /* Enable ACE2 engine if present and disabled */ | |||
| 356 | if (val & C3_CPUID_HAS_ACE20x000100) { | |||
| 357 | #ifdef CRYPTO1 | |||
| 358 | if (!(val & C3_CPUID_DO_ACE20x000200)) { | |||
| 359 | msreg = rdmsr(0x1107); | |||
| 360 | msreg |= (0x01 << 28); | |||
| 361 | wrmsr(0x1107, msreg); | |||
| 362 | } | |||
| 363 | amd64_has_xcrypt |= C3_HAS_AESCTR8; | |||
| 364 | #endif /* CRYPTO */ | |||
| 365 | printf(" AES-CTR"); | |||
| 366 | } | |||
| 367 | ||||
| 368 | /* Enable SHA engine if present and disabled */ | |||
| 369 | if (val & C3_CPUID_HAS_PHE0x000400) { | |||
| 370 | #ifdef CRYPTO1 | |||
| 371 | if (!(val & C3_CPUID_DO_PHE0x000800)) { | |||
| 372 | msreg = rdmsr(0x1107); | |||
| 373 | msreg |= (0x01 << 28/**/); | |||
| 374 | wrmsr(0x1107, msreg); | |||
| 375 | } | |||
| 376 | amd64_has_xcrypt |= C3_HAS_SHA2; | |||
| 377 | #endif /* CRYPTO */ | |||
| 378 | printf(" SHA1 SHA256"); | |||
| 379 | } | |||
| 380 | ||||
| 381 | /* Enable MM engine if present and disabled */ | |||
| 382 | if (val & C3_CPUID_HAS_PMM0x001000) { | |||
| 383 | #ifdef CRYPTO1 | |||
| 384 | if (!(val & C3_CPUID_DO_PMM0x002000)) { | |||
| 385 | msreg = rdmsr(0x1107); | |||
| 386 | msreg |= (0x01 << 28/**/); | |||
| 387 | wrmsr(0x1107, msreg); | |||
| 388 | } | |||
| 389 | amd64_has_xcrypt |= C3_HAS_MM4; | |||
| 390 | #endif /* CRYPTO */ | |||
| 391 | printf(" RSA"); | |||
| 392 | } | |||
| 393 | ||||
| 394 | printf("\n"); | |||
| 395 | } | |||
| 396 | } | |||
| 397 | ||||
| 398 | #ifndef SMALL_KERNEL | |||
| 399 | void via_update_sensor(void *args); | |||
| 400 | void | |||
| 401 | via_update_sensor(void *args) | |||
| 402 | { | |||
| 403 | struct cpu_info *ci = (struct cpu_info *) args; | |||
| 404 | u_int64_t msr; | |||
| 405 | ||||
| 406 | msr = rdmsr(MSR_CENT_TMTEMPERATURE0x1423); | |||
| 407 | ci->ci_sensor.value = (msr & 0xffffff); | |||
| 408 | /* micro degrees */ | |||
| 409 | ci->ci_sensor.value *= 1000000; | |||
| 410 | ci->ci_sensor.value += 273150000; | |||
| 411 | ci->ci_sensor.flags &= ~SENSOR_FINVALID0x0001; | |||
| 412 | } | |||
| 413 | #endif | |||
| 414 | ||||
| 415 | uint64_t | |||
| 416 | cpu_freq_ctr(struct cpu_info *ci) | |||
| 417 | { | |||
| 418 | uint64_t count, last_count, msr; | |||
| 419 | ||||
| 420 | if ((ci->ci_flags & CPUF_CONST_TSC0x0040) == 0 || | |||
| 421 | (cpu_perf_eax & CPUIDEAX_VERID0x000000ff) <= 1 || | |||
| 422 | CPUIDEDX_NUM_FC(cpu_perf_edx)(((cpu_perf_edx) >> 0) & 0x0000001f) <= 1) | |||
| 423 | return (0); | |||
| 424 | ||||
| 425 | msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL0x38d); | |||
| 426 | if (msr & MSR_PERF_FIXED_CTR_FC(1, MSR_PERF_FIXED_CTR_FC_MASK)((0x3) << (4 * (1)))) { | |||
| 427 | /* some hypervisor is dicking us around */ | |||
| 428 | return (0); | |||
| 429 | } | |||
| 430 | ||||
| 431 | msr |= MSR_PERF_FIXED_CTR_FC(1, MSR_PERF_FIXED_CTR_FC_1)((0x1) << (4 * (1))); | |||
| 432 | wrmsr(MSR_PERF_FIXED_CTR_CTRL0x38d, msr); | |||
| 433 | ||||
| 434 | msr = rdmsr(MSR_PERF_GLOBAL_CTRL0x38f) | MSR_PERF_GLOBAL_CTR1_EN(1ULL << 33); | |||
| 435 | wrmsr(MSR_PERF_GLOBAL_CTRL0x38f, msr); | |||
| 436 | ||||
| 437 | last_count = rdmsr(MSR_PERF_FIXED_CTR10x30a); | |||
| 438 | delay(100000)(*delay_func)(100000); | |||
| 439 | count = rdmsr(MSR_PERF_FIXED_CTR10x30a); | |||
| 440 | ||||
| 441 | msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL0x38d); | |||
| 442 | msr &= MSR_PERF_FIXED_CTR_FC(1, MSR_PERF_FIXED_CTR_FC_MASK)((0x3) << (4 * (1))); | |||
| 443 | wrmsr(MSR_PERF_FIXED_CTR_CTRL0x38d, msr); | |||
| 444 | ||||
| 445 | msr = rdmsr(MSR_PERF_GLOBAL_CTRL0x38f); | |||
| 446 | msr &= ~MSR_PERF_GLOBAL_CTR1_EN(1ULL << 33); | |||
| 447 | wrmsr(MSR_PERF_GLOBAL_CTRL0x38f, msr); | |||
| 448 | ||||
| 449 | return ((count - last_count) * 10); | |||
| 450 | } | |||
| 451 | ||||
| 452 | uint64_t | |||
| 453 | cpu_freq(struct cpu_info *ci) | |||
| 454 | { | |||
| 455 | uint64_t last_count, count; | |||
| 456 | ||||
| 457 | count = cpu_freq_ctr(ci); | |||
| 458 | if (count != 0) | |||
| 459 | return (count); | |||
| 460 | ||||
| 461 | last_count = rdtsc(); | |||
| 462 | delay(100000)(*delay_func)(100000); | |||
| 463 | count = rdtsc(); | |||
| 464 | ||||
| 465 | return ((count - last_count) * 10); | |||
| 466 | } | |||
| 467 | ||||
| 468 | void | |||
| 469 | identifycpu(struct cpu_info *ci) | |||
| 470 | { | |||
| 471 | uint64_t freq = 0; | |||
| 472 | u_int32_t dummy, val; | |||
| 473 | char mycpu_model[48]; | |||
| 474 | int i; | |||
| 475 | char *brandstr_from, *brandstr_to; | |||
| 476 | int skipspace; | |||
| 477 | ||||
| 478 | CPUID(1, ci->ci_signature, val, dummy, ci->ci_feature_flags)__asm volatile("cpuid" : "=a" (ci->ci_signature), "=b" (val ), "=c" (dummy), "=d" (ci->ci_feature_flags) : "a" (1)); | |||
| 479 | CPUID(0x80000000, ci->ci_pnfeatset, dummy, dummy, dummy)__asm volatile("cpuid" : "=a" (ci->ci_pnfeatset), "=b" (dummy ), "=c" (dummy), "=d" (dummy) : "a" (0x80000000)); | |||
| 480 | if (ci->ci_pnfeatset >= 0x80000001) { | |||
| 481 | CPUID(0x80000001, ci->ci_efeature_eax, dummy,__asm volatile("cpuid" : "=a" (ci->ci_efeature_eax), "=b" ( dummy), "=c" (ci->ci_efeature_ecx), "=d" (ci->ci_feature_eflags ) : "a" (0x80000001)) | |||
| 482 | ci->ci_efeature_ecx, ci->ci_feature_eflags)__asm volatile("cpuid" : "=a" (ci->ci_efeature_eax), "=b" ( dummy), "=c" (ci->ci_efeature_ecx), "=d" (ci->ci_feature_eflags ) : "a" (0x80000001)); | |||
| 483 | /* Other bits may clash */ | |||
| 484 | ci->ci_feature_flags |= (ci->ci_feature_eflags & CPUID_NXE0x00100000); | |||
| 485 | if (CPU_IS_PRIMARY(ci)((ci)->ci_flags & 0x0008)) | |||
| 486 | ecpu_ecxfeature = ci->ci_efeature_ecx; | |||
| 487 | /* Let cpu_feature be the common bits */ | |||
| 488 | cpu_feature &= ci->ci_feature_flags; | |||
| 489 | } | |||
| 490 | ||||
| 491 | CPUID(0x80000002, ci->ci_brand[0],__asm volatile("cpuid" : "=a" (ci->ci_brand[0]), "=b" (ci-> ci_brand[1]), "=c" (ci->ci_brand[2]), "=d" (ci->ci_brand [3]) : "a" (0x80000002)) | |||
| 492 | ci->ci_brand[1], ci->ci_brand[2], ci->ci_brand[3])__asm volatile("cpuid" : "=a" (ci->ci_brand[0]), "=b" (ci-> ci_brand[1]), "=c" (ci->ci_brand[2]), "=d" (ci->ci_brand [3]) : "a" (0x80000002)); | |||
| 493 | CPUID(0x80000003, ci->ci_brand[4],__asm volatile("cpuid" : "=a" (ci->ci_brand[4]), "=b" (ci-> ci_brand[5]), "=c" (ci->ci_brand[6]), "=d" (ci->ci_brand [7]) : "a" (0x80000003)) | |||
| 494 | ci->ci_brand[5], ci->ci_brand[6], ci->ci_brand[7])__asm volatile("cpuid" : "=a" (ci->ci_brand[4]), "=b" (ci-> ci_brand[5]), "=c" (ci->ci_brand[6]), "=d" (ci->ci_brand [7]) : "a" (0x80000003)); | |||
| 495 | CPUID(0x80000004, ci->ci_brand[8],__asm volatile("cpuid" : "=a" (ci->ci_brand[8]), "=b" (ci-> ci_brand[9]), "=c" (ci->ci_brand[10]), "=d" (ci->ci_brand [11]) : "a" (0x80000004)) | |||
| 496 | ci->ci_brand[9], ci->ci_brand[10], ci->ci_brand[11])__asm volatile("cpuid" : "=a" (ci->ci_brand[8]), "=b" (ci-> ci_brand[9]), "=c" (ci->ci_brand[10]), "=d" (ci->ci_brand [11]) : "a" (0x80000004)); | |||
| 497 | strlcpy(mycpu_model, (char *)ci->ci_brand, sizeof(mycpu_model)); | |||
| 498 | ||||
| 499 | /* Remove leading, trailing and duplicated spaces from mycpu_model */ | |||
| 500 | brandstr_from = brandstr_to = mycpu_model; | |||
| 501 | skipspace = 1; | |||
| 502 | while (*brandstr_from != '\0') { | |||
| 503 | if (!skipspace || *brandstr_from != ' ') { | |||
| 504 | skipspace = 0; | |||
| 505 | *(brandstr_to++) = *brandstr_from; | |||
| 506 | } | |||
| 507 | if (*brandstr_from == ' ') | |||
| 508 | skipspace = 1; | |||
| 509 | brandstr_from++; | |||
| 510 | } | |||
| 511 | if (skipspace && brandstr_to > mycpu_model) | |||
| 512 | brandstr_to--; | |||
| 513 | *brandstr_to = '\0'; | |||
| 514 | ||||
| 515 | if (mycpu_model[0] == 0) | |||
| 516 | strlcpy(mycpu_model, "Opteron or Athlon 64", | |||
| 517 | sizeof(mycpu_model)); | |||
| 518 | ||||
| 519 | /* If primary cpu, fill in the global cpu_model used by sysctl */ | |||
| 520 | if (CPU_IS_PRIMARY(ci)((ci)->ci_flags & 0x0008)) | |||
| 521 | strlcpy(cpu_model, mycpu_model, sizeof(cpu_model)); | |||
| 522 | ||||
| 523 | ci->ci_family = (ci->ci_signature >> 8) & 0x0f; | |||
| 524 | ci->ci_model = (ci->ci_signature >> 4) & 0x0f; | |||
| 525 | if (ci->ci_family == 0x6 || ci->ci_family == 0xf) { | |||
| 526 | ci->ci_family += (ci->ci_signature >> 20) & 0xff; | |||
| 527 | ci->ci_model += ((ci->ci_signature >> 16) & 0x0f) << 4; | |||
| 528 | } | |||
| 529 | ||||
| 530 | #if NPVBUS1 > 0 | |||
| 531 | /* Detect hypervisors early, attach the paravirtual bus later */ | |||
| 532 | if (CPU_IS_PRIMARY(ci)((ci)->ci_flags & 0x0008) && cpu_ecxfeature & CPUIDECX_HV0x80000000) | |||
| 533 | pvbus_identify(); | |||
| 534 | #endif | |||
| 535 | ||||
| 536 | if (ci->ci_feature_flags && ci->ci_feature_flags & CPUID_TSC0x00000010) { | |||
| 537 | /* Has TSC, check if it's constant */ | |||
| 538 | if (!strcmp(cpu_vendor, "GenuineIntel")) { | |||
| 539 | if ((ci->ci_family == 0x0f && ci->ci_model >= 0x03) || | |||
| 540 | (ci->ci_family == 0x06 && ci->ci_model >= 0x0e)) { | |||
| 541 | ci->ci_flags |= CPUF_CONST_TSC0x0040; | |||
| 542 | } | |||
| 543 | } else if (!strcmp(cpu_vendor, "CentaurHauls")) { | |||
| 544 | /* VIA */ | |||
| 545 | if (ci->ci_model >= 0x0f) { | |||
| 546 | ci->ci_flags |= CPUF_CONST_TSC0x0040; | |||
| 547 | } | |||
| 548 | } else if (!strcmp(cpu_vendor, "AuthenticAMD")) { | |||
| 549 | if (cpu_apmi_edx & CPUIDEDX_ITSC(1 << 8)) { | |||
| 550 | /* Invariant TSC indicates constant TSC on | |||
| 551 | * AMD. | |||
| 552 | */ | |||
| 553 | ci->ci_flags |= CPUF_CONST_TSC0x0040; | |||
| 554 | } | |||
| 555 | } | |||
| 556 | ||||
| 557 | /* Check if it's an invariant TSC */ | |||
| 558 | if (cpu_apmi_edx & CPUIDEDX_ITSC(1 << 8)) | |||
| 559 | ci->ci_flags |= CPUF_INVAR_TSC0x0100; | |||
| 560 | ||||
| 561 | tsc_identify(ci); | |||
| 562 | } | |||
| 563 | ||||
| 564 | freq = cpu_freq(ci); | |||
| 565 | ||||
| 566 | amd_cpu_cacheinfo(ci); | |||
| 567 | ||||
| 568 | printf("%s: %s", ci->ci_dev->dv_xname, mycpu_model); | |||
| 569 | ||||
| 570 | if (freq != 0) | |||
| 571 | printf(", %llu.%02llu MHz", (freq + 4999) / 1000000, | |||
| 572 | ((freq + 4999) / 10000) % 100); | |||
| 573 | ||||
| 574 | if (CPU_IS_PRIMARY(ci)((ci)->ci_flags & 0x0008)) { | |||
| 575 | cpuspeed = (freq + 4999) / 1000000; | |||
| 576 | cpu_cpuspeed = cpu_amd64speed; | |||
| 577 | } | |||
| 578 | ||||
| 579 | printf(", %02x-%02x-%02x", ci->ci_family, ci->ci_model, | |||
| 580 | ci->ci_signature & 0x0f); | |||
| 581 | ||||
| 582 | printf("\n%s: ", ci->ci_dev->dv_xname); | |||
| 583 | ||||
| 584 | for (i = 0; i < nitems(cpu_cpuid_features)(sizeof((cpu_cpuid_features)) / sizeof((cpu_cpuid_features)[0 ])); i++) | |||
| 585 | if (ci->ci_feature_flags & cpu_cpuid_features[i].bit) | |||
| 586 | printf("%s%s", i? "," : "", cpu_cpuid_features[i].str); | |||
| 587 | for (i = 0; i < nitems(cpu_cpuid_ecxfeatures)(sizeof((cpu_cpuid_ecxfeatures)) / sizeof((cpu_cpuid_ecxfeatures )[0])); i++) | |||
| 588 | if (cpu_ecxfeature & cpu_cpuid_ecxfeatures[i].bit) | |||
| 589 | printf(",%s", cpu_cpuid_ecxfeatures[i].str); | |||
| 590 | for (i = 0; i < nitems(cpu_ecpuid_features)(sizeof((cpu_ecpuid_features)) / sizeof((cpu_ecpuid_features) [0])); i++) | |||
| 591 | if (ci->ci_feature_eflags & cpu_ecpuid_features[i].bit) | |||
| 592 | printf(",%s", cpu_ecpuid_features[i].str); | |||
| 593 | for (i = 0; i < nitems(cpu_ecpuid_ecxfeatures)(sizeof((cpu_ecpuid_ecxfeatures)) / sizeof((cpu_ecpuid_ecxfeatures )[0])); i++) | |||
| 594 | if (ecpu_ecxfeature & cpu_ecpuid_ecxfeatures[i].bit) | |||
| 595 | printf(",%s", cpu_ecpuid_ecxfeatures[i].str); | |||
| 596 | for (i = 0; i < nitems(cpu_cpuid_perf_eax)(sizeof((cpu_cpuid_perf_eax)) / sizeof((cpu_cpuid_perf_eax)[0 ])); i++) | |||
| 597 | if (cpu_perf_eax & cpu_cpuid_perf_eax[i].bit) | |||
| 598 | printf(",%s", cpu_cpuid_perf_eax[i].str); | |||
| 599 | for (i = 0; i < nitems(cpu_cpuid_apmi_edx)(sizeof((cpu_cpuid_apmi_edx)) / sizeof((cpu_cpuid_apmi_edx)[0 ])); i++) | |||
| 600 | if (cpu_apmi_edx & cpu_cpuid_apmi_edx[i].bit) | |||
| 601 | printf(",%s", cpu_cpuid_apmi_edx[i].str); | |||
| 602 | ||||
| 603 | if (cpuid_level >= 0x07) { | |||
| 604 | /* "Structured Extended Feature Flags" */ | |||
| 605 | CPUID_LEAF(0x7, 0, dummy, ci->ci_feature_sefflags_ebx,__asm volatile("cpuid" : "=a" (dummy), "=b" (ci->ci_feature_sefflags_ebx ), "=c" (ci->ci_feature_sefflags_ecx), "=d" (ci->ci_feature_sefflags_edx ) : "a" (0x7), "c" (0)) | |||
| 606 | ci->ci_feature_sefflags_ecx, ci->ci_feature_sefflags_edx)__asm volatile("cpuid" : "=a" (dummy), "=b" (ci->ci_feature_sefflags_ebx ), "=c" (ci->ci_feature_sefflags_ecx), "=d" (ci->ci_feature_sefflags_edx ) : "a" (0x7), "c" (0)); | |||
| 607 | for (i = 0; i < nitems(cpu_seff0_ebxfeatures)(sizeof((cpu_seff0_ebxfeatures)) / sizeof((cpu_seff0_ebxfeatures )[0])); i++) | |||
| 608 | if (ci->ci_feature_sefflags_ebx & | |||
| 609 | cpu_seff0_ebxfeatures[i].bit) | |||
| 610 | printf(",%s", cpu_seff0_ebxfeatures[i].str); | |||
| 611 | for (i = 0; i < nitems(cpu_seff0_ecxfeatures)(sizeof((cpu_seff0_ecxfeatures)) / sizeof((cpu_seff0_ecxfeatures )[0])); i++) | |||
| 612 | if (ci->ci_feature_sefflags_ecx & | |||
| 613 | cpu_seff0_ecxfeatures[i].bit) | |||
| 614 | printf(",%s", cpu_seff0_ecxfeatures[i].str); | |||
| 615 | for (i = 0; i < nitems(cpu_seff0_edxfeatures)(sizeof((cpu_seff0_edxfeatures)) / sizeof((cpu_seff0_edxfeatures )[0])); i++) | |||
| 616 | if (ci->ci_feature_sefflags_edx & | |||
| 617 | cpu_seff0_edxfeatures[i].bit) | |||
| 618 | printf(",%s", cpu_seff0_edxfeatures[i].str); | |||
| 619 | } | |||
| 620 | ||||
| 621 | if (!strcmp(cpu_vendor, "GenuineIntel") && cpuid_level >= 0x06) { | |||
| 622 | CPUID(0x06, ci->ci_feature_tpmflags, dummy, dummy, dummy)__asm volatile("cpuid" : "=a" (ci->ci_feature_tpmflags), "=b" (dummy), "=c" (dummy), "=d" (dummy) : "a" (0x06)); | |||
| 623 | for (i = 0; i < nitems(cpu_tpm_eaxfeatures)(sizeof((cpu_tpm_eaxfeatures)) / sizeof((cpu_tpm_eaxfeatures) [0])); i++) | |||
| 624 | if (ci->ci_feature_tpmflags & | |||
| 625 | cpu_tpm_eaxfeatures[i].bit) | |||
| 626 | printf(",%s", cpu_tpm_eaxfeatures[i].str); | |||
| 627 | } else if (!strcmp(cpu_vendor, "AuthenticAMD")) { | |||
| 628 | if (ci->ci_family >= 0x12) | |||
| 629 | ci->ci_feature_tpmflags |= TPM_ARAT0x00000004; | |||
| 630 | } | |||
| 631 | ||||
| 632 | /* AMD speculation control features */ | |||
| 633 | if (!strcmp(cpu_vendor, "AuthenticAMD")) { | |||
| 634 | if (ci->ci_pnfeatset >= 0x80000008) { | |||
| 635 | CPUID(0x80000008, dummy, ci->ci_feature_amdspec_ebx,__asm volatile("cpuid" : "=a" (dummy), "=b" (ci->ci_feature_amdspec_ebx ), "=c" (dummy), "=d" (dummy) : "a" (0x80000008)) | |||
| 636 | dummy, dummy)__asm volatile("cpuid" : "=a" (dummy), "=b" (ci->ci_feature_amdspec_ebx ), "=c" (dummy), "=d" (dummy) : "a" (0x80000008)); | |||
| 637 | for (i = 0; i < nitems(cpu_amdspec_ebxfeatures)(sizeof((cpu_amdspec_ebxfeatures)) / sizeof((cpu_amdspec_ebxfeatures )[0])); i++) | |||
| 638 | if (ci->ci_feature_amdspec_ebx & | |||
| 639 | cpu_amdspec_ebxfeatures[i].bit) | |||
| 640 | printf(",%s", | |||
| 641 | cpu_amdspec_ebxfeatures[i].str); | |||
| 642 | } | |||
| 643 | } | |||
| 644 | ||||
| 645 | /* xsave subfeatures */ | |||
| 646 | if (cpuid_level >= 0xd) { | |||
| 647 | CPUID_LEAF(0xd, 1, val, dummy, dummy, dummy)__asm volatile("cpuid" : "=a" (val), "=b" (dummy), "=c" (dummy ), "=d" (dummy) : "a" (0xd), "c" (1)); | |||
| 648 | for (i = 0; i < nitems(cpu_xsave_extfeatures)(sizeof((cpu_xsave_extfeatures)) / sizeof((cpu_xsave_extfeatures )[0])); i++) | |||
| 649 | if (val & cpu_xsave_extfeatures[i].bit) | |||
| 650 | printf(",%s", cpu_xsave_extfeatures[i].str); | |||
| 651 | } | |||
| 652 | ||||
| 653 | if (cpu_meltdown) | |||
| 654 | printf(",MELTDOWN"); | |||
| 655 | ||||
| 656 | printf("\n"); | |||
| 657 | ||||
| 658 | replacemeltdown(); | |||
| 659 | x86_print_cacheinfo(ci); | |||
| 660 | ||||
| 661 | /* | |||
| 662 | * "Mitigation G-2" per AMD's Whitepaper "Software Techniques | |||
| 663 | * for Managing Speculation on AMD Processors" | |||
| 664 | * | |||
| 665 | * By setting MSR C001_1029[1]=1, LFENCE becomes a dispatch | |||
| 666 | * serializing instruction. | |||
| 667 | * | |||
| 668 | * This MSR is available on all AMD families >= 10h, except 11h | |||
| 669 | * where LFENCE is always serializing. | |||
| 670 | */ | |||
| 671 | if (!strcmp(cpu_vendor, "AuthenticAMD")) { | |||
| 672 | if (ci->ci_family >= 0x10 && ci->ci_family != 0x11) { | |||
| 673 | uint64_t msr; | |||
| 674 | ||||
| 675 | msr = rdmsr(MSR_DE_CFG0xc0011029); | |||
| 676 | if ((msr & DE_CFG_SERIALIZE_LFENCE(1 << 1)) == 0) { | |||
| 677 | msr |= DE_CFG_SERIALIZE_LFENCE(1 << 1); | |||
| 678 | wrmsr(MSR_DE_CFG0xc0011029, msr); | |||
| 679 | } | |||
| 680 | } | |||
| 681 | } | |||
| 682 | ||||
| 683 | /* | |||
| 684 | * Attempt to disable Silicon Debug and lock the configuration | |||
| 685 | * if it's enabled and unlocked. | |||
| 686 | */ | |||
| 687 | if (!strcmp(cpu_vendor, "GenuineIntel") && | |||
| 688 | (cpu_ecxfeature & CPUIDECX_SDBG0x00000800)) { | |||
| 689 | uint64_t msr; | |||
| 690 | ||||
| 691 | msr = rdmsr(IA32_DEBUG_INTERFACE0xc80); | |||
| 692 | if ((msr & IA32_DEBUG_INTERFACE_ENABLE0x00000001) && | |||
| 693 | (msr & IA32_DEBUG_INTERFACE_LOCK0x40000000) == 0) { | |||
| 694 | msr &= IA32_DEBUG_INTERFACE_MASK0x80000000; | |||
| 695 | msr |= IA32_DEBUG_INTERFACE_LOCK0x40000000; | |||
| 696 | wrmsr(IA32_DEBUG_INTERFACE0xc80, msr); | |||
| 697 | } else if (msr & IA32_DEBUG_INTERFACE_ENABLE0x00000001) | |||
| 698 | printf("%s: cannot disable silicon debug\n", | |||
| 699 | ci->ci_dev->dv_xname); | |||
| 700 | } | |||
| 701 | ||||
| 702 | if (CPU_IS_PRIMARY(ci)((ci)->ci_flags & 0x0008)) { | |||
| 703 | #ifndef SMALL_KERNEL | |||
| 704 | if (!strcmp(cpu_vendor, "AuthenticAMD") && | |||
| 705 | ci->ci_pnfeatset >= 0x80000007) { | |||
| 706 | CPUID(0x80000007, dummy, dummy, dummy, val)__asm volatile("cpuid" : "=a" (dummy), "=b" (dummy), "=c" (dummy ), "=d" (val) : "a" (0x80000007)); | |||
| 707 | ||||
| 708 | if (val & 0x06) { | |||
| 709 | if ((ci->ci_signature & 0xF00) == 0xF00) | |||
| 710 | setperf_setup = k8_powernow_init; | |||
| 711 | } | |||
| 712 | if (ci->ci_family >= 0x10) | |||
| 713 | setperf_setup = k1x_init; | |||
| 714 | } | |||
| 715 | ||||
| 716 | if (cpu_ecxfeature & CPUIDECX_EST0x00000080) | |||
| 717 | setperf_setup = est_init; | |||
| 718 | #endif | |||
| 719 | ||||
| 720 | if (cpu_ecxfeature & CPUIDECX_RDRAND0x40000000) | |||
| 721 | has_rdrand = 1; | |||
| 722 | ||||
| 723 | if (ci->ci_feature_sefflags_ebx & SEFF0EBX_RDSEED0x00040000) | |||
| 724 | has_rdseed = 1; | |||
| 725 | ||||
| 726 | if (ci->ci_feature_sefflags_ebx & SEFF0EBX_SMAP0x00100000) | |||
| 727 | replacesmap(); | |||
| 728 | } | |||
| 729 | ||||
| 730 | if (ci->ci_feature_flags & CPUID_CFLUSH0x00080000) { | |||
| 731 | u_int32_t cflushsz; | |||
| 732 | ||||
| 733 | CPUID(0x01, dummy, cflushsz, dummy, dummy)__asm volatile("cpuid" : "=a" (dummy), "=b" (cflushsz), "=c" ( dummy), "=d" (dummy) : "a" (0x01)); | |||
| 734 | /* cflush cacheline size is equal to bits 15-8 of ebx * 8 */ | |||
| 735 | ci->ci_cflushsz = ((cflushsz >> 8) & 0xff) * 8; | |||
| 736 | } | |||
| 737 | ||||
| 738 | #ifndef SMALL_KERNEL | |||
| 739 | if (CPU_IS_PRIMARY(ci)((ci)->ci_flags & 0x0008) && (ci->ci_feature_tpmflags & TPM_SENSOR0x00000001)) { | |||
| 740 | strlcpy(ci->ci_sensordev.xname, ci->ci_dev->dv_xname, | |||
| 741 | sizeof(ci->ci_sensordev.xname)); | |||
| 742 | ci->ci_sensor.type = SENSOR_TEMP; | |||
| 743 | sensor_task_register(ci, intelcore_update_sensor, 5); | |||
| 744 | sensor_attach(&ci->ci_sensordev, &ci->ci_sensor); | |||
| 745 | sensordev_install(&ci->ci_sensordev); | |||
| 746 | } | |||
| 747 | #endif | |||
| 748 | ||||
| 749 | #ifdef CRYPTO1 | |||
| 750 | if (CPU_IS_PRIMARY(ci)((ci)->ci_flags & 0x0008)) { | |||
| 751 | if (cpu_ecxfeature & CPUIDECX_PCLMUL0x00000002) | |||
| 752 | amd64_has_pclmul = 1; | |||
| 753 | ||||
| 754 | if (cpu_ecxfeature & CPUIDECX_AES0x02000000) | |||
| 755 | amd64_has_aesni = 1; | |||
| 756 | } | |||
| 757 | #endif | |||
| 758 | ||||
| 759 | if (!strcmp(cpu_vendor, "AuthenticAMD")) | |||
| 760 | amd64_errata(ci); | |||
| 761 | ||||
| 762 | if (CPU_IS_PRIMARY(ci)((ci)->ci_flags & 0x0008) && !strcmp(cpu_vendor, "CentaurHauls")) { | |||
| 763 | ci->cpu_setup = via_nano_setup; | |||
| 764 | #ifndef SMALL_KERNEL | |||
| 765 | strlcpy(ci->ci_sensordev.xname, ci->ci_dev->dv_xname, | |||
| 766 | sizeof(ci->ci_sensordev.xname)); | |||
| 767 | ci->ci_sensor.type = SENSOR_TEMP; | |||
| 768 | sensor_task_register(ci, via_update_sensor, 5); | |||
| 769 | sensor_attach(&ci->ci_sensordev, &ci->ci_sensor); | |||
| 770 | sensordev_install(&ci->ci_sensordev); | |||
| 771 | #endif | |||
| 772 | } | |||
| 773 | ||||
| 774 | tsc_timecounter_init(ci, freq); | |||
| 775 | ||||
| 776 | cpu_topology(ci); | |||
| 777 | #if NVMM1 > 0 | |||
| 778 | cpu_check_vmm_cap(ci); | |||
| 779 | #endif /* NVMM > 0 */ | |||
| 780 | } | |||
| 781 | ||||
| 782 | #ifndef SMALL_KERNEL | |||
| 783 | /* | |||
| 784 | * Base 2 logarithm of an int. returns 0 for 0 (yeye, I know). | |||
| 785 | */ | |||
| 786 | static int | |||
| 787 | log2(unsigned int i) | |||
| 788 | { | |||
| 789 | int ret = 0; | |||
| 790 | ||||
| 791 | while (i >>= 1) | |||
| 792 | ret++; | |||
| 793 | ||||
| 794 | return (ret); | |||
| 795 | } | |||
| 796 | ||||
| 797 | static int | |||
| 798 | mask_width(u_int x) | |||
| 799 | { | |||
| 800 | int bit; | |||
| 801 | int mask; | |||
| 802 | int powerof2; | |||
| 803 | ||||
| 804 | powerof2 = ((x - 1) & x) == 0; | |||
| 805 | mask = (x << (1 - powerof2)) - 1; | |||
| 806 | ||||
| 807 | /* fls */ | |||
| 808 | if (mask == 0) | |||
| 809 | return (0); | |||
| 810 | for (bit = 1; mask != 1; bit++) | |||
| 811 | mask = (unsigned int)mask >> 1; | |||
| 812 | ||||
| 813 | return (bit); | |||
| 814 | } | |||
| 815 | #endif | |||
| 816 | ||||
| 817 | /* | |||
| 818 | * Build up cpu topology for given cpu, must run on the core itself. | |||
| 819 | */ | |||
| 820 | void | |||
| 821 | cpu_topology(struct cpu_info *ci) | |||
| 822 | { | |||
| 823 | #ifndef SMALL_KERNEL | |||
| 824 | u_int32_t eax, ebx, ecx, edx; | |||
| 825 | u_int32_t apicid, max_apicid = 0, max_coreid = 0; | |||
| 826 | u_int32_t smt_bits = 0, core_bits, pkg_bits = 0; | |||
| 827 | u_int32_t smt_mask = 0, core_mask, pkg_mask = 0; | |||
| 828 | ||||
| 829 | /* We need at least apicid at CPUID 1 */ | |||
| 830 | if (cpuid_level < 1) | |||
| ||||
| 831 | goto no_topology; | |||
| 832 | ||||
| 833 | /* Initial apicid */ | |||
| 834 | CPUID(1, eax, ebx, ecx, edx)__asm volatile("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (1)); | |||
| 835 | apicid = (ebx >> 24) & 0xff; | |||
| 836 | ||||
| 837 | if (strcmp(cpu_vendor, "AuthenticAMD") == 0) { | |||
| 838 | uint32_t nthreads = 1; /* per core */ | |||
| 839 | uint32_t thread_id; /* within a package */ | |||
| 840 | ||||
| 841 | /* We need at least apicid at CPUID 0x80000008 */ | |||
| 842 | if (ci->ci_pnfeatset < 0x80000008) | |||
| 843 | goto no_topology; | |||
| 844 | ||||
| 845 | CPUID(0x80000008, eax, ebx, ecx, edx)__asm volatile("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000008)); | |||
| 846 | core_bits = (ecx >> 12) & 0xf; | |||
| 847 | ||||
| 848 | if (ci->ci_pnfeatset >= 0x8000001e) { | |||
| 849 | CPUID(0x8000001e, eax, ebx, ecx, edx)__asm volatile("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x8000001e)); | |||
| 850 | nthreads = ((ebx >> 8) & 0xf) + 1; | |||
| 851 | } | |||
| 852 | ||||
| 853 | /* Shift the core_bits off to get at the pkg bits */ | |||
| 854 | ci->ci_pkg_id = apicid >> core_bits; | |||
| 855 | ||||
| 856 | /* Get rid of the package bits */ | |||
| 857 | core_mask = (1 << core_bits) - 1; | |||
| 858 | thread_id = apicid & core_mask; | |||
| 859 | ||||
| 860 | /* Cut logical thread_id into core id, and smt id in a core */ | |||
| 861 | ci->ci_core_id = thread_id / nthreads; | |||
| 862 | ci->ci_smt_id = thread_id % nthreads; | |||
| 863 | } else if (strcmp(cpu_vendor, "GenuineIntel") == 0) { | |||
| 864 | /* We only support leaf 1/4 detection */ | |||
| 865 | if (cpuid_level < 4) | |||
| 866 | goto no_topology; | |||
| 867 | /* Get max_apicid */ | |||
| 868 | CPUID(1, eax, ebx, ecx, edx)__asm volatile("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (1)); | |||
| 869 | max_apicid = (ebx >> 16) & 0xff; | |||
| 870 | /* Get max_coreid */ | |||
| 871 | CPUID_LEAF(4, 0, eax, ebx, ecx, edx)__asm volatile("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (4), "c" (0)); | |||
| 872 | max_coreid = ((eax >> 26) & 0x3f) + 1; | |||
| 873 | /* SMT */ | |||
| 874 | smt_bits = mask_width(max_apicid / max_coreid); | |||
| 875 | smt_mask = (1 << smt_bits) - 1; | |||
| 876 | /* Core */ | |||
| 877 | core_bits = log2(max_coreid); | |||
| 878 | core_mask = (1 << (core_bits + smt_bits)) - 1; | |||
| 879 | core_mask ^= smt_mask; | |||
| 880 | /* Pkg */ | |||
| 881 | pkg_bits = core_bits + smt_bits; | |||
| 882 | pkg_mask = -1 << core_bits; | |||
| ||||
| 883 | ||||
| 884 | ci->ci_smt_id = apicid & smt_mask; | |||
| 885 | ci->ci_core_id = (apicid & core_mask) >> smt_bits; | |||
| 886 | ci->ci_pkg_id = (apicid & pkg_mask) >> pkg_bits; | |||
| 887 | } else | |||
| 888 | goto no_topology; | |||
| 889 | #ifdef DEBUG | |||
| 890 | printf("cpu%d: smt %u, core %u, pkg %u " | |||
| 891 | "(apicid 0x%x, max_apicid 0x%x, max_coreid 0x%x, smt_bits 0x%x, smt_mask 0x%x, " | |||
| 892 | "core_bits 0x%x, core_mask 0x%x, pkg_bits 0x%x, pkg_mask 0x%x)\n", | |||
| 893 | ci->ci_cpuid, ci->ci_smt_id, ci->ci_core_id, ci->ci_pkg_id, | |||
| 894 | apicid, max_apicid, max_coreid, smt_bits, smt_mask, core_bits, | |||
| 895 | core_mask, pkg_bits, pkg_mask); | |||
| 896 | #else | |||
| 897 | printf("cpu%d: smt %u, core %u, package %u\n", ci->ci_cpuid, | |||
| 898 | ci->ci_smt_id, ci->ci_core_id, ci->ci_pkg_id); | |||
| 899 | ||||
| 900 | #endif | |||
| 901 | return; | |||
| 902 | /* We can't map, so consider ci_core_id as ci_cpuid */ | |||
| 903 | no_topology: | |||
| 904 | #endif | |||
| 905 | ci->ci_smt_id = 0; | |||
| 906 | ci->ci_core_id = ci->ci_cpuid; | |||
| 907 | ci->ci_pkg_id = 0; | |||
| 908 | } | |||
| 909 | ||||
| 910 | #if NVMM1 > 0 | |||
| 911 | /* | |||
| 912 | * cpu_check_vmm_cap | |||
| 913 | * | |||
| 914 | * Checks for VMM capabilities for 'ci'. Initializes certain per-cpu VMM | |||
| 915 | * state in 'ci' if virtualization extensions are found. | |||
| 916 | * | |||
| 917 | * Parameters: | |||
| 918 | * ci: the cpu being checked | |||
| 919 | */ | |||
| 920 | void | |||
| 921 | cpu_check_vmm_cap(struct cpu_info *ci) | |||
| 922 | { | |||
| 923 | uint64_t msr; | |||
| 924 | uint32_t cap, dummy, edx; | |||
| 925 | ||||
| 926 | /* | |||
| 927 | * Check for workable VMX | |||
| 928 | */ | |||
| 929 | if (cpu_ecxfeature & CPUIDECX_VMX0x00000020) { | |||
| 930 | msr = rdmsr(MSR_IA32_FEATURE_CONTROL0x03a); | |||
| 931 | ||||
| 932 | if (!(msr & IA32_FEATURE_CONTROL_LOCK0x01)) | |||
| 933 | ci->ci_vmm_flags |= CI_VMM_VMX(1 << 0); | |||
| 934 | else { | |||
| 935 | if (msr & IA32_FEATURE_CONTROL_VMX_EN0x04) | |||
| 936 | ci->ci_vmm_flags |= CI_VMM_VMX(1 << 0); | |||
| 937 | else | |||
| 938 | ci->ci_vmm_flags |= CI_VMM_DIS(1 << 4); | |||
| 939 | } | |||
| 940 | } | |||
| 941 | ||||
| 942 | /* | |||
| 943 | * Check for EPT (Intel Nested Paging) and other secondary | |||
| 944 | * controls | |||
| 945 | */ | |||
| 946 | if (ci->ci_vmm_flags & CI_VMM_VMX(1 << 0)) { | |||
| 947 | /* Secondary controls available? */ | |||
| 948 | /* XXX should we check true procbased ctls here if avail? */ | |||
| 949 | msr = rdmsr(IA32_VMX_PROCBASED_CTLS0x482); | |||
| 950 | if (msr & (IA32_VMX_ACTIVATE_SECONDARY_CONTROLS(1ULL << 31)) << 32) { | |||
| 951 | msr = rdmsr(IA32_VMX_PROCBASED2_CTLS0x48B); | |||
| 952 | /* EPT available? */ | |||
| 953 | if (msr & (IA32_VMX_ENABLE_EPT(1ULL << 1)) << 32) | |||
| 954 | ci->ci_vmm_flags |= CI_VMM_EPT(1 << 3); | |||
| 955 | /* VM Functions available? */ | |||
| 956 | if (msr & (IA32_VMX_ENABLE_VM_FUNCTIONS(1ULL << 13)) << 32) { | |||
| 957 | ci->ci_vmm_cap.vcc_vmx.vmx_vm_func = | |||
| 958 | rdmsr(IA32_VMX_VMFUNC0x491); | |||
| 959 | } | |||
| 960 | } | |||
| 961 | } | |||
| 962 | ||||
| 963 | /* | |||
| 964 | * Check startup config (VMX) | |||
| 965 | */ | |||
| 966 | if (ci->ci_vmm_flags & CI_VMM_VMX(1 << 0)) { | |||
| 967 | /* CR0 fixed and flexible bits */ | |||
| 968 | msr = rdmsr(IA32_VMX_CR0_FIXED00x486); | |||
| 969 | ci->ci_vmm_cap.vcc_vmx.vmx_cr0_fixed0 = msr; | |||
| 970 | msr = rdmsr(IA32_VMX_CR0_FIXED10x487); | |||
| 971 | ci->ci_vmm_cap.vcc_vmx.vmx_cr0_fixed1 = msr; | |||
| 972 | ||||
| 973 | /* CR4 fixed and flexible bits */ | |||
| 974 | msr = rdmsr(IA32_VMX_CR4_FIXED00x488); | |||
| 975 | ci->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed0 = msr; | |||
| 976 | msr = rdmsr(IA32_VMX_CR4_FIXED10x489); | |||
| 977 | ci->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed1 = msr; | |||
| 978 | ||||
| 979 | /* VMXON region revision ID (bits 30:0 of IA32_VMX_BASIC) */ | |||
| 980 | msr = rdmsr(IA32_VMX_BASIC0x480); | |||
| 981 | ci->ci_vmm_cap.vcc_vmx.vmx_vmxon_revision = | |||
| 982 | (uint32_t)(msr & 0x7FFFFFFF); | |||
| 983 | ||||
| 984 | /* MSR save / load table size */ | |||
| 985 | msr = rdmsr(IA32_VMX_MISC0x485); | |||
| 986 | ci->ci_vmm_cap.vcc_vmx.vmx_msr_table_size = | |||
| 987 | (uint32_t)(msr & IA32_VMX_MSR_LIST_SIZE_MASK(7ULL << 25)) >> 25; | |||
| 988 | ||||
| 989 | /* CR3 target count size */ | |||
| 990 | ci->ci_vmm_cap.vcc_vmx.vmx_cr3_tgt_count = | |||
| 991 | (uint32_t)(msr & IA32_VMX_CR3_TGT_SIZE_MASK(0x1FFULL << 16)) >> 16; | |||
| 992 | } | |||
| 993 | ||||
| 994 | /* | |||
| 995 | * Check for workable SVM | |||
| 996 | */ | |||
| 997 | if (ecpu_ecxfeature & CPUIDECX_SVM0x00000004) { | |||
| 998 | msr = rdmsr(MSR_AMD_VM_CR0xc0010114); | |||
| 999 | ||||
| 1000 | if (!(msr & AMD_SVMDIS0x10)) | |||
| 1001 | ci->ci_vmm_flags |= CI_VMM_SVM(1 << 1); | |||
| 1002 | ||||
| 1003 | CPUID(CPUID_AMD_SVM_CAP, dummy,__asm volatile("cpuid" : "=a" (dummy), "=b" (ci->ci_vmm_cap .vcc_svm.svm_max_asid), "=c" (dummy), "=d" (edx) : "a" (0x8000000A )) | |||
| 1004 | ci->ci_vmm_cap.vcc_svm.svm_max_asid, dummy, edx)__asm volatile("cpuid" : "=a" (dummy), "=b" (ci->ci_vmm_cap .vcc_svm.svm_max_asid), "=c" (dummy), "=d" (edx) : "a" (0x8000000A )); | |||
| 1005 | ||||
| 1006 | if (ci->ci_vmm_cap.vcc_svm.svm_max_asid > 0xFFF) | |||
| 1007 | ci->ci_vmm_cap.vcc_svm.svm_max_asid = 0xFFF; | |||
| 1008 | ||||
| 1009 | if (edx & AMD_SVM_FLUSH_BY_ASID_CAP(1 << 6)) | |||
| 1010 | ci->ci_vmm_cap.vcc_svm.svm_flush_by_asid = 1; | |||
| 1011 | ||||
| 1012 | if (edx & AMD_SVM_VMCB_CLEAN_CAP(1 << 5)) | |||
| 1013 | ci->ci_vmm_cap.vcc_svm.svm_vmcb_clean = 1; | |||
| 1014 | } | |||
| 1015 | ||||
| 1016 | /* | |||
| 1017 | * Check for SVM Nested Paging | |||
| 1018 | */ | |||
| 1019 | if ((ci->ci_vmm_flags & CI_VMM_SVM(1 << 1)) && | |||
| 1020 | ci->ci_pnfeatset >= CPUID_AMD_SVM_CAP0x8000000A) { | |||
| 1021 | CPUID(CPUID_AMD_SVM_CAP, dummy, dummy, dummy, cap)__asm volatile("cpuid" : "=a" (dummy), "=b" (dummy), "=c" (dummy ), "=d" (cap) : "a" (0x8000000A)); | |||
| 1022 | if (cap & AMD_SVM_NESTED_PAGING_CAP(1 << 0)) | |||
| 1023 | ci->ci_vmm_flags |= CI_VMM_RVI(1 << 2); | |||
| 1024 | } | |||
| 1025 | ||||
| 1026 | /* | |||
| 1027 | * Check "L1 flush on VM entry" (Intel L1TF vuln) semantics | |||
| 1028 | * Full details can be found here: | |||
| 1029 | * https://software.intel.com/security-software-guidance/insights/deep-dive-intel-analysis-l1-terminal-fault | |||
| 1030 | */ | |||
| 1031 | if (!strcmp(cpu_vendor, "GenuineIntel")) { | |||
| 1032 | if (ci->ci_feature_sefflags_edx & SEFF0EDX_L1DF0x10000000) | |||
| 1033 | ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr = 1; | |||
| 1034 | else | |||
| 1035 | ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr = 0; | |||
| 1036 | ||||
| 1037 | /* | |||
| 1038 | * Certain CPUs may have the vulnerability remedied in | |||
| 1039 | * hardware (RDCL_NO), or we may be nested in an VMM that | |||
| 1040 | * is doing flushes (SKIP_L1DFL_VMENTRY) using the MSR. | |||
| 1041 | * In either case no mitigation at all is necessary. | |||
| 1042 | */ | |||
| 1043 | if (ci->ci_feature_sefflags_edx & SEFF0EDX_ARCH_CAP0x20000000) { | |||
| 1044 | msr = rdmsr(MSR_ARCH_CAPABILITIES0x10a); | |||
| 1045 | if ((msr & ARCH_CAPABILITIES_RDCL_NO(1 << 0)) || | |||
| 1046 | ((msr & ARCH_CAPABILITIES_SKIP_L1DFL_VMENTRY(1 << 3)) && | |||
| 1047 | ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr)) | |||
| 1048 | ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr = | |||
| 1049 | VMX_SKIP_L1D_FLUSH2; | |||
| 1050 | } | |||
| 1051 | } | |||
| 1052 | } | |||
| 1053 | #endif /* NVMM > 0 */ |