File: | arch/amd64/amd64/vmm_machdep.c |
Warning: | line 4076, column 4 Value stored to 'irq' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: vmm_machdep.c,v 1.14 2024/01/10 04:13:59 dv Exp $ */ |
2 | /* |
3 | * Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org> |
4 | * |
5 | * Permission to use, copy, modify, and distribute this software for any |
6 | * purpose with or without fee is hereby granted, provided that the above |
7 | * copyright notice and this permission notice appear in all copies. |
8 | * |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
16 | */ |
17 | |
18 | #include <sys/param.h> |
19 | #include <sys/systm.h> |
20 | #include <sys/signalvar.h> |
21 | #include <sys/malloc.h> |
22 | #include <sys/device.h> |
23 | #include <sys/pool.h> |
24 | #include <sys/proc.h> |
25 | #include <sys/user.h> |
26 | #include <sys/ioctl.h> |
27 | #include <sys/queue.h> |
28 | #include <sys/refcnt.h> |
29 | #include <sys/rwlock.h> |
30 | #include <sys/pledge.h> |
31 | #include <sys/memrange.h> |
32 | #include <sys/tracepoint.h> |
33 | |
34 | #include <uvm/uvm_extern.h> |
35 | |
36 | #include <machine/fpu.h> |
37 | #include <machine/pmap.h> |
38 | #include <machine/biosvar.h> |
39 | #include <machine/segments.h> |
40 | #include <machine/cpufunc.h> |
41 | #include <machine/vmmvar.h> |
42 | |
43 | #include <dev/isa/isareg.h> |
44 | #include <dev/pv/pvreg.h> |
45 | |
46 | #include <dev/vmm/vmm.h> |
47 | |
48 | #ifdef MP_LOCKDEBUG |
49 | #include <ddb/db_output.h> |
50 | extern int __mp_lock_spinout; |
51 | #endif /* MP_LOCKDEBUG */ |
52 | |
53 | void *l1tf_flush_region; |
54 | |
55 | #define DEVNAME(s)((s)->sc_dev.dv_xname) ((s)->sc_dev.dv_xname) |
56 | |
57 | #define CTRL_DUMP(x,y,z)printf(" %s: Can set:%s Can clear:%s\n", "z" , vcpu_vmx_check_cap (x, IA32_VMX_y_CTLS, IA32_VMX_z, 1) ? "Yes" : "No", vcpu_vmx_check_cap (x, IA32_VMX_y_CTLS, IA32_VMX_z, 0) ? "Yes" : "No"); printf(" %s: Can set:%s Can clear:%s\n", #z , \ |
58 | vcpu_vmx_check_cap(x, IA32_VMX_##y ##_CTLS, \ |
59 | IA32_VMX_##z, 1) ? "Yes" : "No", \ |
60 | vcpu_vmx_check_cap(x, IA32_VMX_##y ##_CTLS, \ |
61 | IA32_VMX_##z, 0) ? "Yes" : "No"); |
62 | |
63 | #define VMX_EXIT_INFO_HAVE_RIP0x1 0x1 |
64 | #define VMX_EXIT_INFO_HAVE_REASON0x2 0x2 |
65 | #define VMX_EXIT_INFO_COMPLETE(0x1 | 0x2) \ |
66 | (VMX_EXIT_INFO_HAVE_RIP0x1 | VMX_EXIT_INFO_HAVE_REASON0x2) |
67 | |
68 | void vmx_dump_vmcs_field(uint16_t, const char *); |
69 | int vmm_enabled(void); |
70 | void vmm_activate_machdep(struct device *, int); |
71 | int vmmioctl_machdep(dev_t, u_long, caddr_t, int, struct proc *); |
72 | int vmm_quiesce_vmx(void); |
73 | int vm_run(struct vm_run_params *); |
74 | int vm_intr_pending(struct vm_intr_params *); |
75 | int vm_rwregs(struct vm_rwregs_params *, int); |
76 | int vm_mprotect_ept(struct vm_mprotect_ept_params *); |
77 | int vm_rwvmparams(struct vm_rwvmparams_params *, int); |
78 | int vcpu_readregs_vmx(struct vcpu *, uint64_t, int, struct vcpu_reg_state *); |
79 | int vcpu_readregs_svm(struct vcpu *, uint64_t, struct vcpu_reg_state *); |
80 | int vcpu_writeregs_vmx(struct vcpu *, uint64_t, int, struct vcpu_reg_state *); |
81 | int vcpu_writeregs_svm(struct vcpu *, uint64_t, struct vcpu_reg_state *); |
82 | int vcpu_reset_regs(struct vcpu *, struct vcpu_reg_state *); |
83 | int vcpu_reset_regs_vmx(struct vcpu *, struct vcpu_reg_state *); |
84 | int vcpu_reset_regs_svm(struct vcpu *, struct vcpu_reg_state *); |
85 | int vcpu_reload_vmcs_vmx(struct vcpu *); |
86 | int vcpu_init(struct vcpu *); |
87 | int vcpu_init_vmx(struct vcpu *); |
88 | int vcpu_init_svm(struct vcpu *); |
89 | int vcpu_run_vmx(struct vcpu *, struct vm_run_params *); |
90 | int vcpu_run_svm(struct vcpu *, struct vm_run_params *); |
91 | void vcpu_deinit(struct vcpu *); |
92 | void vcpu_deinit_svm(struct vcpu *); |
93 | void vcpu_deinit_vmx(struct vcpu *); |
94 | int vm_impl_init(struct vm *, struct proc *); |
95 | int vm_impl_init_vmx(struct vm *, struct proc *); |
96 | int vm_impl_init_svm(struct vm *, struct proc *); |
97 | void vm_impl_deinit(struct vm *); |
98 | int vcpu_vmx_check_cap(struct vcpu *, uint32_t, uint32_t, int); |
99 | int vcpu_vmx_compute_ctrl(uint64_t, uint16_t, uint32_t, uint32_t, uint32_t *); |
100 | int vmx_get_exit_info(uint64_t *, uint64_t *); |
101 | int vmx_load_pdptes(struct vcpu *); |
102 | int vmx_handle_exit(struct vcpu *); |
103 | int svm_handle_exit(struct vcpu *); |
104 | int svm_handle_msr(struct vcpu *); |
105 | int vmm_handle_xsetbv(struct vcpu *, uint64_t *); |
106 | int vmx_handle_xsetbv(struct vcpu *); |
107 | int svm_handle_xsetbv(struct vcpu *); |
108 | int vmm_handle_cpuid(struct vcpu *); |
109 | int vmx_handle_rdmsr(struct vcpu *); |
110 | int vmx_handle_wrmsr(struct vcpu *); |
111 | int vmx_handle_cr0_write(struct vcpu *, uint64_t); |
112 | int vmx_handle_cr4_write(struct vcpu *, uint64_t); |
113 | int vmx_handle_cr(struct vcpu *); |
114 | int svm_handle_inout(struct vcpu *); |
115 | int vmx_handle_inout(struct vcpu *); |
116 | int svm_handle_hlt(struct vcpu *); |
117 | int vmx_handle_hlt(struct vcpu *); |
118 | int vmm_inject_ud(struct vcpu *); |
119 | int vmm_inject_gp(struct vcpu *); |
120 | int vmm_inject_db(struct vcpu *); |
121 | void vmx_handle_intr(struct vcpu *); |
122 | void vmx_handle_intwin(struct vcpu *); |
123 | void vmx_handle_misc_enable_msr(struct vcpu *); |
124 | int vmm_get_guest_memtype(struct vm *, paddr_t); |
125 | int vmx_get_guest_faulttype(void); |
126 | int svm_get_guest_faulttype(struct vmcb *); |
127 | int vmx_get_exit_qualification(uint64_t *); |
128 | int vmm_get_guest_cpu_cpl(struct vcpu *); |
129 | int vmm_get_guest_cpu_mode(struct vcpu *); |
130 | int svm_fault_page(struct vcpu *, paddr_t); |
131 | int vmx_fault_page(struct vcpu *, paddr_t); |
132 | int vmx_handle_np_fault(struct vcpu *); |
133 | int svm_handle_np_fault(struct vcpu *); |
134 | int vmx_mprotect_ept(vm_map_t, paddr_t, paddr_t, int); |
135 | pt_entry_t *vmx_pmap_find_pte_ept(pmap_t, paddr_t); |
136 | int vmm_alloc_vpid(uint16_t *); |
137 | void vmm_free_vpid(uint16_t); |
138 | const char *vcpu_state_decode(u_int); |
139 | const char *vmx_exit_reason_decode(uint32_t); |
140 | const char *svm_exit_reason_decode(uint32_t); |
141 | const char *vmx_instruction_error_decode(uint32_t); |
142 | void svm_setmsrbr(struct vcpu *, uint32_t); |
143 | void svm_setmsrbw(struct vcpu *, uint32_t); |
144 | void svm_setmsrbrw(struct vcpu *, uint32_t); |
145 | void vmx_setmsrbr(struct vcpu *, uint32_t); |
146 | void vmx_setmsrbw(struct vcpu *, uint32_t); |
147 | void vmx_setmsrbrw(struct vcpu *, uint32_t); |
148 | void svm_set_clean(struct vcpu *, uint32_t); |
149 | void svm_set_dirty(struct vcpu *, uint32_t); |
150 | |
151 | int vmm_gpa_is_valid(struct vcpu *vcpu, paddr_t gpa, size_t obj_size); |
152 | void vmm_init_pvclock(struct vcpu *, paddr_t); |
153 | int vmm_update_pvclock(struct vcpu *); |
154 | int vmm_pat_is_valid(uint64_t); |
155 | |
156 | #ifdef MULTIPROCESSOR1 |
157 | static int vmx_remote_vmclear(struct cpu_info*, struct vcpu *); |
158 | #endif |
159 | |
160 | #ifdef VMM_DEBUG |
161 | void dump_vcpu(struct vcpu *); |
162 | void vmx_vcpu_dump_regs(struct vcpu *); |
163 | void vmx_dump_vmcs(struct vcpu *); |
164 | const char *msr_name_decode(uint32_t); |
165 | void vmm_segment_desc_decode(uint64_t); |
166 | void vmm_decode_cr0(uint64_t); |
167 | void vmm_decode_cr3(uint64_t); |
168 | void vmm_decode_cr4(uint64_t); |
169 | void vmm_decode_msr_value(uint64_t, uint64_t); |
170 | void vmm_decode_apicbase_msr_value(uint64_t); |
171 | void vmm_decode_ia32_fc_value(uint64_t); |
172 | void vmm_decode_mtrrcap_value(uint64_t); |
173 | void vmm_decode_perf_status_value(uint64_t); |
174 | void vmm_decode_perf_ctl_value(uint64_t); |
175 | void vmm_decode_mtrrdeftype_value(uint64_t); |
176 | void vmm_decode_efer_value(uint64_t); |
177 | void vmm_decode_rflags(uint64_t); |
178 | void vmm_decode_misc_enable_value(uint64_t); |
179 | const char *vmm_decode_cpu_mode(struct vcpu *); |
180 | |
181 | extern int mtrr2mrt(int); |
182 | |
183 | struct vmm_reg_debug_info { |
184 | uint64_t vrdi_bit; |
185 | const char *vrdi_present; |
186 | const char *vrdi_absent; |
187 | }; |
188 | #endif /* VMM_DEBUG */ |
189 | |
190 | extern uint64_t tsc_frequency; |
191 | extern int tsc_is_invariant; |
192 | |
193 | const char *vmm_hv_signature = VMM_HV_SIGNATURE"OpenBSDVMM58"; |
194 | |
195 | const struct kmem_pa_mode vmm_kp_contig = { |
196 | .kp_constraint = &no_constraint, |
197 | .kp_maxseg = 1, |
198 | .kp_align = 4096, |
199 | .kp_zero = 1, |
200 | }; |
201 | |
202 | extern struct cfdriver vmm_cd; |
203 | extern const struct cfattach vmm_ca; |
204 | |
205 | /* |
206 | * Helper struct to easily get the VMCS field IDs needed in vmread/vmwrite |
207 | * to access the individual fields of the guest segment registers. This |
208 | * struct is indexed by VCPU_REGS_* id. |
209 | */ |
210 | const struct { |
211 | uint64_t selid; |
212 | uint64_t limitid; |
213 | uint64_t arid; |
214 | uint64_t baseid; |
215 | } vmm_vmx_sreg_vmcs_fields[] = { |
216 | { VMCS_GUEST_IA32_ES_SEL0x0800, VMCS_GUEST_IA32_ES_LIMIT0x4800, |
217 | VMCS_GUEST_IA32_ES_AR0x4814, VMCS_GUEST_IA32_ES_BASE0x6806 }, |
218 | { VMCS_GUEST_IA32_CS_SEL0x0802, VMCS_GUEST_IA32_CS_LIMIT0x4802, |
219 | VMCS_GUEST_IA32_CS_AR0x4816, VMCS_GUEST_IA32_CS_BASE0x6808 }, |
220 | { VMCS_GUEST_IA32_SS_SEL0x0804, VMCS_GUEST_IA32_SS_LIMIT0x4804, |
221 | VMCS_GUEST_IA32_SS_AR0x4818, VMCS_GUEST_IA32_SS_BASE0x680A }, |
222 | { VMCS_GUEST_IA32_DS_SEL0x0806, VMCS_GUEST_IA32_DS_LIMIT0x4806, |
223 | VMCS_GUEST_IA32_DS_AR0x481A, VMCS_GUEST_IA32_DS_BASE0x680C }, |
224 | { VMCS_GUEST_IA32_FS_SEL0x0808, VMCS_GUEST_IA32_FS_LIMIT0x4808, |
225 | VMCS_GUEST_IA32_FS_AR0x481C, VMCS_GUEST_IA32_FS_BASE0x680E }, |
226 | { VMCS_GUEST_IA32_GS_SEL0x080A, VMCS_GUEST_IA32_GS_LIMIT0x480A, |
227 | VMCS_GUEST_IA32_GS_AR0x481E, VMCS_GUEST_IA32_GS_BASE0x6810 }, |
228 | { VMCS_GUEST_IA32_LDTR_SEL0x080C, VMCS_GUEST_IA32_LDTR_LIMIT0x480C, |
229 | VMCS_GUEST_IA32_LDTR_AR0x4820, VMCS_GUEST_IA32_LDTR_BASE0x6812 }, |
230 | { VMCS_GUEST_IA32_TR_SEL0x080E, VMCS_GUEST_IA32_TR_LIMIT0x480E, |
231 | VMCS_GUEST_IA32_TR_AR0x4822, VMCS_GUEST_IA32_TR_BASE0x6814 } |
232 | }; |
233 | |
234 | /* Pools for VMs and VCPUs */ |
235 | extern struct pool vm_pool; |
236 | extern struct pool vcpu_pool; |
237 | |
238 | extern struct vmm_softc *vmm_softc; |
239 | |
240 | /* IDT information used when populating host state area */ |
241 | extern vaddr_t idt_vaddr; |
242 | extern struct gate_descriptor *idt; |
243 | |
244 | /* Constants used in "CR access exit" */ |
245 | #define CR_WRITE0 0 |
246 | #define CR_READ1 1 |
247 | #define CR_CLTS2 2 |
248 | #define CR_LMSW3 3 |
249 | |
250 | /* |
251 | * vmm_enabled |
252 | * |
253 | * Checks if we have at least one CPU with either VMX or SVM. |
254 | * Returns 1 if we have at least one of either type, but not both, 0 otherwise. |
255 | */ |
256 | int |
257 | vmm_enabled(void) |
258 | { |
259 | struct cpu_info *ci; |
260 | CPU_INFO_ITERATORint cii; |
261 | int found_vmx = 0, found_svm = 0; |
262 | |
263 | /* Check if we have at least one CPU with either VMX or SVM */ |
264 | CPU_INFO_FOREACH(cii, ci)for (cii = 0, ci = cpu_info_list; ci != ((void *)0); ci = ci-> ci_next) { |
265 | if (ci->ci_vmm_flags & CI_VMM_VMX(1 << 0)) |
266 | found_vmx = 1; |
267 | if (ci->ci_vmm_flags & CI_VMM_SVM(1 << 1)) |
268 | found_svm = 1; |
269 | } |
270 | |
271 | /* Don't support both SVM and VMX at the same time */ |
272 | if (found_vmx && found_svm) |
273 | return (0); |
274 | |
275 | if (found_vmx || found_svm) |
276 | return 1; |
277 | |
278 | return 0; |
279 | } |
280 | |
281 | void |
282 | vmm_attach_machdep(struct device *parent, struct device *self, void *aux) |
283 | { |
284 | struct vmm_softc *sc = (struct vmm_softc *)self; |
285 | struct cpu_info *ci; |
286 | CPU_INFO_ITERATORint cii; |
287 | |
288 | sc->sc_md.nr_rvi_cpus = 0; |
289 | sc->sc_md.nr_ept_cpus = 0; |
290 | |
291 | /* Calculate CPU features */ |
292 | CPU_INFO_FOREACH(cii, ci)for (cii = 0, ci = cpu_info_list; ci != ((void *)0); ci = ci-> ci_next) { |
293 | if (ci->ci_vmm_flags & CI_VMM_RVI(1 << 2)) |
294 | sc->sc_md.nr_rvi_cpus++; |
295 | if (ci->ci_vmm_flags & CI_VMM_EPT(1 << 3)) |
296 | sc->sc_md.nr_ept_cpus++; |
297 | } |
298 | |
299 | sc->sc_md.pkru_enabled = 0; |
300 | if (rcr4() & CR4_PKE0x00400000) |
301 | sc->sc_md.pkru_enabled = 1; |
302 | |
303 | if (sc->sc_md.nr_ept_cpus) { |
304 | printf(": VMX/EPT"); |
305 | sc->mode = VMM_MODE_EPT; |
306 | } else if (sc->sc_md.nr_rvi_cpus) { |
307 | printf(": SVM/RVI"); |
308 | sc->mode = VMM_MODE_RVI; |
309 | } else { |
310 | printf(": unknown"); |
311 | sc->mode = VMM_MODE_UNKNOWN; |
312 | } |
313 | |
314 | if (sc->mode == VMM_MODE_EPT) { |
315 | if (!(curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr)) { |
316 | l1tf_flush_region = km_alloc(VMX_L1D_FLUSH_SIZE(64 * 1024), |
317 | &kv_any, &vmm_kp_contig, &kd_waitok); |
318 | if (!l1tf_flush_region) { |
319 | printf(" (failing, no memory)"); |
320 | sc->mode = VMM_MODE_UNKNOWN; |
321 | } else { |
322 | printf(" (using slow L1TF mitigation)"); |
323 | memset(l1tf_flush_region, 0xcc,__builtin_memset((l1tf_flush_region), (0xcc), ((64 * 1024))) |
324 | VMX_L1D_FLUSH_SIZE)__builtin_memset((l1tf_flush_region), (0xcc), ((64 * 1024))); |
325 | } |
326 | } |
327 | } |
328 | |
329 | if (sc->mode == VMM_MODE_RVI) { |
330 | sc->max_vpid = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_svm.svm_max_asid; |
331 | } else { |
332 | sc->max_vpid = 0xFFF; |
333 | } |
334 | |
335 | bzero(&sc->vpids, sizeof(sc->vpids))__builtin_bzero((&sc->vpids), (sizeof(sc->vpids))); |
336 | rw_init(&sc->vpid_lock, "vpid")_rw_init_flags(&sc->vpid_lock, "vpid", 0, ((void *)0)); |
337 | } |
338 | |
339 | /* |
340 | * vmm_quiesce_vmx |
341 | * |
342 | * Prepare the host for suspend by flushing all VMCS states. |
343 | */ |
344 | int |
345 | vmm_quiesce_vmx(void) |
346 | { |
347 | struct vm *vm; |
348 | struct vcpu *vcpu; |
349 | int err; |
350 | |
351 | /* |
352 | * We should be only called from a quiescing device state so we |
353 | * don't expect to sleep here. If we can't get all our locks, |
354 | * something is wrong. |
355 | */ |
356 | if ((err = rw_enter(&vmm_softc->vm_lock, RW_WRITE0x0001UL | RW_NOSLEEP0x0040UL))) |
357 | return (err); |
358 | |
359 | /* Iterate over each vm... */ |
360 | SLIST_FOREACH(vm, &vmm_softc->vm_list, vm_link)for((vm) = ((&vmm_softc->vm_list)->slh_first); (vm) != ((void *)0); (vm) = ((vm)->vm_link.sle_next)) { |
361 | /* Iterate over each vcpu... */ |
362 | SLIST_FOREACH(vcpu, &vm->vm_vcpu_list, vc_vcpu_link)for((vcpu) = ((&vm->vm_vcpu_list)->slh_first); (vcpu ) != ((void *)0); (vcpu) = ((vcpu)->vc_vcpu_link.sle_next) ) { |
363 | err = rw_enter(&vcpu->vc_lock, RW_WRITE0x0001UL | RW_NOSLEEP0x0040UL); |
364 | if (err) |
365 | break; |
366 | |
367 | /* We can skip unlaunched VMCS. Nothing to flush. */ |
368 | if (atomic_load_int(&vcpu->vc_vmx_vmcs_state) |
369 | != VMCS_LAUNCHED1) { |
370 | DPRINTF("%s: skipping vcpu %d for vm %d\n", |
371 | __func__, vcpu->vc_id, vm->vm_id); |
372 | rw_exit_write(&vcpu->vc_lock); |
373 | continue; |
374 | } |
375 | |
376 | #ifdef MULTIPROCESSOR1 |
377 | if (vcpu->vc_last_pcpu != curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})) { |
378 | /* Remote cpu vmclear via ipi. */ |
379 | err = vmx_remote_vmclear(vcpu->vc_last_pcpu, |
380 | vcpu); |
381 | if (err) |
382 | printf("%s: failed to remote vmclear " |
383 | "vcpu %d of vm %d\n", __func__, |
384 | vcpu->vc_id, vm->vm_id); |
385 | } else |
386 | #endif |
387 | { |
388 | /* Local cpu vmclear instruction. */ |
389 | if ((err = vmclear(&vcpu->vc_control_pa))) |
390 | printf("%s: failed to locally vmclear " |
391 | "vcpu %d of vm %d\n", __func__, |
392 | vcpu->vc_id, vm->vm_id); |
393 | atomic_swap_uint(&vcpu->vc_vmx_vmcs_state,_atomic_swap_uint((&vcpu->vc_vmx_vmcs_state), (0)) |
394 | VMCS_CLEARED)_atomic_swap_uint((&vcpu->vc_vmx_vmcs_state), (0)); |
395 | } |
396 | |
397 | rw_exit_write(&vcpu->vc_lock); |
398 | if (err) |
399 | break; |
400 | DPRINTF("%s: cleared vcpu %d for vm %d\n", __func__, |
401 | vcpu->vc_id, vm->vm_id); |
402 | } |
403 | if (err) |
404 | break; |
405 | } |
406 | rw_exit_write(&vmm_softc->vm_lock); |
407 | |
408 | if (err) |
409 | return (err); |
410 | return (0); |
411 | } |
412 | |
413 | void |
414 | vmm_activate_machdep(struct device *self, int act) |
415 | { |
416 | struct cpu_info *ci = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;}); |
417 | |
418 | switch (act) { |
419 | case DVACT_QUIESCE2: |
420 | /* If we're not in vmm mode, nothing to do. */ |
421 | if ((ci->ci_flags & CPUF_VMM0x20000) == 0) |
422 | break; |
423 | |
424 | /* Intel systems need extra steps to sync vcpu state. */ |
425 | if (vmm_softc->mode == VMM_MODE_EPT) |
426 | if (vmm_quiesce_vmx()) |
427 | DPRINTF("%s: vmx quiesce failed\n", __func__); |
428 | |
429 | /* Stop virtualization mode on all cpus. */ |
430 | vmm_stop(); |
431 | break; |
432 | |
433 | case DVACT_WAKEUP5: |
434 | /* Restart virtualization mode on all cpu's. */ |
435 | if (vmm_softc->vm_ct > 0) |
436 | vmm_start(); |
437 | break; |
438 | } |
439 | } |
440 | |
441 | int |
442 | vmmioctl_machdep(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) |
443 | { |
444 | int ret; |
445 | |
446 | switch (cmd) { |
447 | case VMM_IOC_INTR((unsigned long)0x80000000 | ((sizeof(struct vm_intr_params) & 0x1fff) << 16) | ((('V')) << 8) | ((6))): |
448 | ret = vm_intr_pending((struct vm_intr_params *)data); |
449 | break; |
450 | case VMM_IOC_MPROTECT_EPT((unsigned long)0x80000000 | ((sizeof(struct vm_mprotect_ept_params ) & 0x1fff) << 16) | ((('V')) << 8) | ((11))): |
451 | ret = vm_mprotect_ept((struct vm_mprotect_ept_params *)data); |
452 | break; |
453 | default: |
454 | DPRINTF("%s: unknown ioctl code 0x%lx\n", __func__, cmd); |
455 | ret = ENOTTY25; |
456 | } |
457 | |
458 | return (ret); |
459 | } |
460 | |
461 | int |
462 | pledge_ioctl_vmm_machdep(struct proc *p, long com) |
463 | { |
464 | switch (com) { |
465 | case VMM_IOC_INTR((unsigned long)0x80000000 | ((sizeof(struct vm_intr_params) & 0x1fff) << 16) | ((('V')) << 8) | ((6))): |
466 | case VMM_IOC_MPROTECT_EPT((unsigned long)0x80000000 | ((sizeof(struct vm_mprotect_ept_params ) & 0x1fff) << 16) | ((('V')) << 8) | ((11))): |
467 | return (0); |
468 | } |
469 | |
470 | return (EPERM1); |
471 | } |
472 | |
473 | /* |
474 | * vm_intr_pending |
475 | * |
476 | * IOCTL handler routine for VMM_IOC_INTR messages, sent from vmd when an |
477 | * interrupt is pending and needs acknowledgment |
478 | * |
479 | * Parameters: |
480 | * vip: Describes the vm/vcpu for which the interrupt is pending |
481 | * |
482 | * Return values: |
483 | * 0: if successful |
484 | * ENOENT: if the VM/VCPU defined by 'vip' cannot be found |
485 | */ |
486 | int |
487 | vm_intr_pending(struct vm_intr_params *vip) |
488 | { |
489 | struct vm *vm; |
490 | struct vcpu *vcpu; |
491 | #ifdef MULTIPROCESSOR1 |
492 | struct cpu_info *ci; |
493 | #endif |
494 | int error, ret = 0; |
495 | |
496 | /* Find the desired VM */ |
497 | error = vm_find(vip->vip_vm_id, &vm); |
498 | |
499 | /* Not found? exit. */ |
500 | if (error != 0) |
501 | return (error); |
502 | |
503 | vcpu = vm_find_vcpu(vm, vip->vip_vcpu_id); |
504 | |
505 | if (vcpu == NULL((void *)0)) { |
506 | ret = ENOENT2; |
507 | goto out; |
508 | } |
509 | |
510 | vcpu->vc_intr = vip->vip_intr; |
511 | #ifdef MULTIPROCESSOR1 |
512 | ci = READ_ONCE(vcpu->vc_curcpu)({ typeof(vcpu->vc_curcpu) __tmp = *(volatile typeof(vcpu-> vc_curcpu) *)&(vcpu->vc_curcpu); membar_datadep_consumer (); __tmp; }); |
513 | if (ci != NULL((void *)0)) |
514 | x86_send_ipi(ci, X86_IPI_NOP0x00000002); |
515 | #endif |
516 | |
517 | out: |
518 | refcnt_rele_wake(&vm->vm_refcnt); |
519 | return (ret); |
520 | } |
521 | |
522 | /* |
523 | * vm_rwvmparams |
524 | * |
525 | * IOCTL handler to read/write the current vmm params like pvclock gpa, pvclock |
526 | * version, etc. |
527 | * |
528 | * Parameters: |
529 | * vrwp: Describes the VM and VCPU to get/set the params from |
530 | * dir: 0 for reading, 1 for writing |
531 | * |
532 | * Return values: |
533 | * 0: if successful |
534 | * ENOENT: if the VM/VCPU defined by 'vpp' cannot be found |
535 | * EINVAL: if an error occurred reading the registers of the guest |
536 | */ |
537 | int |
538 | vm_rwvmparams(struct vm_rwvmparams_params *vpp, int dir) |
539 | { |
540 | struct vm *vm; |
541 | struct vcpu *vcpu; |
542 | int error, ret = 0; |
543 | |
544 | /* Find the desired VM */ |
545 | error = vm_find(vpp->vpp_vm_id, &vm); |
546 | |
547 | /* Not found? exit. */ |
548 | if (error != 0) |
549 | return (error); |
550 | |
551 | vcpu = vm_find_vcpu(vm, vpp->vpp_vcpu_id); |
552 | |
553 | if (vcpu == NULL((void *)0)) { |
554 | ret = ENOENT2; |
555 | goto out; |
556 | } |
557 | |
558 | if (dir == 0) { |
559 | if (vpp->vpp_mask & VM_RWVMPARAMS_PVCLOCK_VERSION0x2) |
560 | vpp->vpp_pvclock_version = vcpu->vc_pvclock_version; |
561 | if (vpp->vpp_mask & VM_RWVMPARAMS_PVCLOCK_SYSTEM_GPA0x1) |
562 | vpp->vpp_pvclock_system_gpa = \ |
563 | vcpu->vc_pvclock_system_gpa; |
564 | } else { |
565 | if (vpp->vpp_mask & VM_RWVMPARAMS_PVCLOCK_VERSION0x2) |
566 | vcpu->vc_pvclock_version = vpp->vpp_pvclock_version; |
567 | if (vpp->vpp_mask & VM_RWVMPARAMS_PVCLOCK_SYSTEM_GPA0x1) { |
568 | vmm_init_pvclock(vcpu, vpp->vpp_pvclock_system_gpa); |
569 | } |
570 | } |
571 | out: |
572 | refcnt_rele_wake(&vm->vm_refcnt); |
573 | return (ret); |
574 | } |
575 | |
576 | /* |
577 | * vm_readregs |
578 | * |
579 | * IOCTL handler to read/write the current register values of a guest VCPU. |
580 | * The VCPU must not be running. |
581 | * |
582 | * Parameters: |
583 | * vrwp: Describes the VM and VCPU to get/set the registers from. The |
584 | * register values are returned here as well. |
585 | * dir: 0 for reading, 1 for writing |
586 | * |
587 | * Return values: |
588 | * 0: if successful |
589 | * ENOENT: if the VM/VCPU defined by 'vrwp' cannot be found |
590 | * EINVAL: if an error occurred accessing the registers of the guest |
591 | * EPERM: if the vm cannot be accessed from the calling process |
592 | */ |
593 | int |
594 | vm_rwregs(struct vm_rwregs_params *vrwp, int dir) |
595 | { |
596 | struct vm *vm; |
597 | struct vcpu *vcpu; |
598 | struct vcpu_reg_state *vrs = &vrwp->vrwp_regs; |
599 | int error, ret = 0; |
600 | |
601 | /* Find the desired VM */ |
602 | error = vm_find(vrwp->vrwp_vm_id, &vm); |
603 | |
604 | /* Not found? exit. */ |
605 | if (error != 0) |
606 | return (error); |
607 | |
608 | vcpu = vm_find_vcpu(vm, vrwp->vrwp_vcpu_id); |
609 | |
610 | if (vcpu == NULL((void *)0)) { |
611 | ret = ENOENT2; |
612 | goto out; |
613 | } |
614 | |
615 | rw_enter_write(&vcpu->vc_lock); |
616 | if (vmm_softc->mode == VMM_MODE_EPT) |
617 | ret = (dir == 0) ? |
618 | vcpu_readregs_vmx(vcpu, vrwp->vrwp_mask, 1, vrs) : |
619 | vcpu_writeregs_vmx(vcpu, vrwp->vrwp_mask, 1, vrs); |
620 | else if (vmm_softc->mode == VMM_MODE_RVI) |
621 | ret = (dir == 0) ? |
622 | vcpu_readregs_svm(vcpu, vrwp->vrwp_mask, vrs) : |
623 | vcpu_writeregs_svm(vcpu, vrwp->vrwp_mask, vrs); |
624 | else { |
625 | DPRINTF("%s: unknown vmm mode", __func__); |
626 | ret = EINVAL22; |
627 | } |
628 | rw_exit_write(&vcpu->vc_lock); |
629 | out: |
630 | refcnt_rele_wake(&vm->vm_refcnt); |
631 | return (ret); |
632 | } |
633 | |
634 | /* |
635 | * vm_mprotect_ept |
636 | * |
637 | * IOCTL handler to sets the access protections of the ept |
638 | * |
639 | * Parameters: |
640 | * vmep: describes the memory for which the protect will be applied.. |
641 | * |
642 | * Return values: |
643 | * 0: if successful |
644 | * ENOENT: if the VM defined by 'vmep' cannot be found |
645 | * EINVAL: if the sgpa or size is not page aligned, the prot is invalid, |
646 | * size is too large (512GB), there is wraparound |
647 | * (like start = 512GB-1 and end = 512GB-2), |
648 | * the address specified is not within the vm's mem range |
649 | * or the address lies inside reserved (MMIO) memory |
650 | */ |
651 | int |
652 | vm_mprotect_ept(struct vm_mprotect_ept_params *vmep) |
653 | { |
654 | struct vm *vm; |
655 | struct vcpu *vcpu; |
656 | vaddr_t sgpa; |
657 | size_t size; |
658 | vm_prot_t prot; |
659 | uint64_t msr; |
660 | int ret = 0, memtype; |
661 | |
662 | /* If not EPT or RVI, nothing to do here */ |
663 | if (!(vmm_softc->mode == VMM_MODE_EPT |
664 | || vmm_softc->mode == VMM_MODE_RVI)) |
665 | return (0); |
666 | |
667 | /* Find the desired VM */ |
668 | ret = vm_find(vmep->vmep_vm_id, &vm); |
669 | |
670 | /* Not found? exit. */ |
671 | if (ret != 0) { |
672 | DPRINTF("%s: vm id %u not found\n", __func__, |
673 | vmep->vmep_vm_id); |
674 | return (ret); |
675 | } |
676 | |
677 | vcpu = vm_find_vcpu(vm, vmep->vmep_vcpu_id); |
678 | |
679 | if (vcpu == NULL((void *)0)) { |
680 | DPRINTF("%s: vcpu id %u of vm %u not found\n", __func__, |
681 | vmep->vmep_vcpu_id, vmep->vmep_vm_id); |
682 | ret = ENOENT2; |
683 | goto out_nolock; |
684 | } |
685 | |
686 | rw_enter_write(&vcpu->vc_lock); |
687 | |
688 | if (vcpu->vc_state != VCPU_STATE_STOPPED) { |
689 | DPRINTF("%s: mprotect_ept %u on vm %u attempted " |
690 | "while vcpu was in state %u (%s)\n", __func__, |
691 | vmep->vmep_vcpu_id, vmep->vmep_vm_id, vcpu->vc_state, |
692 | vcpu_state_decode(vcpu->vc_state)); |
693 | ret = EBUSY16; |
694 | goto out; |
695 | } |
696 | |
697 | /* Only proceed if the pmap is in the correct mode */ |
698 | KASSERT((vmm_softc->mode == VMM_MODE_EPT &&(((vmm_softc->mode == VMM_MODE_EPT && vm->vm_map ->pmap->pm_type == 2) || (vmm_softc->mode == VMM_MODE_RVI && vm->vm_map->pmap->pm_type == 3)) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/arch/amd64/amd64/vmm_machdep.c" , 701, "(vmm_softc->mode == VMM_MODE_EPT && vm->vm_map->pmap->pm_type == PMAP_TYPE_EPT) || (vmm_softc->mode == VMM_MODE_RVI && vm->vm_map->pmap->pm_type == PMAP_TYPE_RVI)" )) |
699 | vm->vm_map->pmap->pm_type == PMAP_TYPE_EPT) ||(((vmm_softc->mode == VMM_MODE_EPT && vm->vm_map ->pmap->pm_type == 2) || (vmm_softc->mode == VMM_MODE_RVI && vm->vm_map->pmap->pm_type == 3)) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/arch/amd64/amd64/vmm_machdep.c" , 701, "(vmm_softc->mode == VMM_MODE_EPT && vm->vm_map->pmap->pm_type == PMAP_TYPE_EPT) || (vmm_softc->mode == VMM_MODE_RVI && vm->vm_map->pmap->pm_type == PMAP_TYPE_RVI)" )) |
700 | (vmm_softc->mode == VMM_MODE_RVI &&(((vmm_softc->mode == VMM_MODE_EPT && vm->vm_map ->pmap->pm_type == 2) || (vmm_softc->mode == VMM_MODE_RVI && vm->vm_map->pmap->pm_type == 3)) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/arch/amd64/amd64/vmm_machdep.c" , 701, "(vmm_softc->mode == VMM_MODE_EPT && vm->vm_map->pmap->pm_type == PMAP_TYPE_EPT) || (vmm_softc->mode == VMM_MODE_RVI && vm->vm_map->pmap->pm_type == PMAP_TYPE_RVI)" )) |
701 | vm->vm_map->pmap->pm_type == PMAP_TYPE_RVI))(((vmm_softc->mode == VMM_MODE_EPT && vm->vm_map ->pmap->pm_type == 2) || (vmm_softc->mode == VMM_MODE_RVI && vm->vm_map->pmap->pm_type == 3)) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/arch/amd64/amd64/vmm_machdep.c" , 701, "(vmm_softc->mode == VMM_MODE_EPT && vm->vm_map->pmap->pm_type == PMAP_TYPE_EPT) || (vmm_softc->mode == VMM_MODE_RVI && vm->vm_map->pmap->pm_type == PMAP_TYPE_RVI)" )); |
702 | |
703 | sgpa = vmep->vmep_sgpa; |
704 | size = vmep->vmep_size; |
705 | prot = vmep->vmep_prot; |
706 | |
707 | /* No W^X permissions */ |
708 | if ((prot & PROT_MASK(0x01 | 0x02 | 0x04)) != prot && |
709 | (prot & (PROT_WRITE0x02 | PROT_EXEC0x04)) == (PROT_WRITE0x02 | PROT_EXEC0x04)) { |
710 | DPRINTF("%s: W+X permission requested\n", __func__); |
711 | ret = EINVAL22; |
712 | goto out; |
713 | } |
714 | |
715 | /* No Write only permissions */ |
716 | if ((prot & (PROT_READ0x01 | PROT_WRITE0x02 | PROT_EXEC0x04)) == PROT_WRITE0x02) { |
717 | DPRINTF("%s: No Write only permissions\n", __func__); |
718 | ret = EINVAL22; |
719 | goto out; |
720 | } |
721 | |
722 | /* No empty permissions */ |
723 | if (prot == 0) { |
724 | DPRINTF("%s: No empty permissions\n", __func__); |
725 | ret = EINVAL22; |
726 | goto out; |
727 | } |
728 | |
729 | /* No execute only on EPT CPUs that don't have that capability */ |
730 | if (vmm_softc->mode == VMM_MODE_EPT) { |
731 | msr = rdmsr(IA32_VMX_EPT_VPID_CAP0x48C); |
732 | if (prot == PROT_EXEC0x04 && |
733 | (msr & IA32_EPT_VPID_CAP_XO_TRANSLATIONS(1ULL << 0)) == 0) { |
734 | DPRINTF("%s: Execute only permissions unsupported," |
735 | " adding read permission\n", __func__); |
736 | |
737 | prot |= PROT_READ0x01; |
738 | } |
739 | } |
740 | |
741 | /* Must be page aligned */ |
742 | if ((sgpa & PAGE_MASK((1 << 12) - 1)) || (size & PAGE_MASK((1 << 12) - 1)) || size == 0) { |
743 | ret = EINVAL22; |
744 | goto out; |
745 | } |
746 | |
747 | /* size must be less then 512GB */ |
748 | if (size >= NBPD_L4(1ULL << 39)) { |
749 | ret = EINVAL22; |
750 | goto out; |
751 | } |
752 | |
753 | /* no wraparound */ |
754 | if (sgpa + size < sgpa) { |
755 | ret = EINVAL22; |
756 | goto out; |
757 | } |
758 | |
759 | /* |
760 | * Specifying addresses within the PCI MMIO space is forbidden. |
761 | * Disallow addresses that start inside the MMIO space: |
762 | * [VMM_PCI_MMIO_BAR_BASE .. VMM_PCI_MMIO_BAR_END] |
763 | */ |
764 | if (sgpa >= VMM_PCI_MMIO_BAR_BASE0xF0000000ULL && sgpa <= VMM_PCI_MMIO_BAR_END0xFFDFFFFFULL) { |
765 | ret = EINVAL22; |
766 | goto out; |
767 | } |
768 | |
769 | /* |
770 | * ... and disallow addresses that end inside the MMIO space: |
771 | * (VMM_PCI_MMIO_BAR_BASE .. VMM_PCI_MMIO_BAR_END] |
772 | */ |
773 | if (sgpa + size > VMM_PCI_MMIO_BAR_BASE0xF0000000ULL && |
774 | sgpa + size <= VMM_PCI_MMIO_BAR_END0xFFDFFFFFULL) { |
775 | ret = EINVAL22; |
776 | goto out; |
777 | } |
778 | |
779 | memtype = vmm_get_guest_memtype(vm, sgpa); |
780 | if (memtype == VMM_MEM_TYPE_UNKNOWN) { |
781 | ret = EINVAL22; |
782 | goto out; |
783 | } |
784 | |
785 | if (vmm_softc->mode == VMM_MODE_EPT) |
786 | ret = vmx_mprotect_ept(vm->vm_map, sgpa, sgpa + size, prot); |
787 | else if (vmm_softc->mode == VMM_MODE_RVI) { |
788 | pmap_write_protect(vm->vm_map->pmap, sgpa, sgpa + size, prot); |
789 | /* XXX requires a invlpga */ |
790 | ret = 0; |
791 | } else |
792 | ret = EINVAL22; |
793 | out: |
794 | if (vcpu != NULL((void *)0)) |
795 | rw_exit_write(&vcpu->vc_lock); |
796 | out_nolock: |
797 | refcnt_rele_wake(&vm->vm_refcnt); |
798 | return (ret); |
799 | } |
800 | |
801 | /* |
802 | * vmx_mprotect_ept |
803 | * |
804 | * apply the ept protections to the requested pages, faulting in the page if |
805 | * required. |
806 | */ |
807 | int |
808 | vmx_mprotect_ept(vm_map_t vm_map, paddr_t sgpa, paddr_t egpa, int prot) |
809 | { |
810 | struct vmx_invept_descriptor vid; |
811 | pmap_t pmap; |
812 | pt_entry_t *pte; |
813 | paddr_t addr; |
814 | int ret = 0; |
815 | |
816 | pmap = vm_map->pmap; |
817 | |
818 | KERNEL_LOCK()_kernel_lock(); |
819 | |
820 | for (addr = sgpa; addr < egpa; addr += PAGE_SIZE(1 << 12)) { |
821 | pte = vmx_pmap_find_pte_ept(pmap, addr); |
822 | if (pte == NULL((void *)0)) { |
823 | ret = uvm_fault(vm_map, addr, VM_FAULT_WIRE((vm_fault_t) 0x2), |
824 | PROT_READ0x01 | PROT_WRITE0x02 | PROT_EXEC0x04); |
825 | if (ret) |
826 | printf("%s: uvm_fault returns %d, GPA=0x%llx\n", |
827 | __func__, ret, (uint64_t)addr); |
828 | |
829 | pte = vmx_pmap_find_pte_ept(pmap, addr); |
830 | if (pte == NULL((void *)0)) { |
831 | KERNEL_UNLOCK()_kernel_unlock(); |
832 | return EFAULT14; |
833 | } |
834 | } |
835 | |
836 | if (prot & PROT_READ0x01) |
837 | *pte |= EPT_R(1ULL << 0); |
838 | else |
839 | *pte &= ~EPT_R(1ULL << 0); |
840 | |
841 | if (prot & PROT_WRITE0x02) |
842 | *pte |= EPT_W(1ULL << 1); |
843 | else |
844 | *pte &= ~EPT_W(1ULL << 1); |
845 | |
846 | if (prot & PROT_EXEC0x04) |
847 | *pte |= EPT_X(1ULL << 2); |
848 | else |
849 | *pte &= ~EPT_X(1ULL << 2); |
850 | } |
851 | |
852 | /* |
853 | * SDM 3C: 28.3.3.4 Guidelines for Use of the INVEPT Instruction |
854 | * the first bullet point seems to say we should call invept. |
855 | * |
856 | * Software should use the INVEPT instruction with the “single-context” |
857 | * INVEPT type after making any of the following changes to an EPT |
858 | * paging-structure entry (the INVEPT descriptor should contain an |
859 | * EPTP value that references — directly or indirectly |
860 | * — the modified EPT paging structure): |
861 | * — Changing any of the privilege bits 2:0 from 1 to 0. |
862 | * */ |
863 | if (pmap->eptp != 0) { |
864 | memset(&vid, 0, sizeof(vid))__builtin_memset((&vid), (0), (sizeof(vid))); |
865 | vid.vid_eptp = pmap->eptp; |
866 | DPRINTF("%s: flushing EPT TLB for EPTP 0x%llx\n", __func__, |
867 | vid.vid_eptp); |
868 | invept(IA32_VMX_INVEPT_SINGLE_CTX0x1, &vid); |
869 | } |
870 | |
871 | KERNEL_UNLOCK()_kernel_unlock(); |
872 | |
873 | return ret; |
874 | } |
875 | |
876 | /* |
877 | * vmx_pmap_find_pte_ept |
878 | * |
879 | * find the page table entry specified by addr in the pmap supplied. |
880 | */ |
881 | pt_entry_t * |
882 | vmx_pmap_find_pte_ept(pmap_t pmap, paddr_t addr) |
883 | { |
884 | int l4idx, l3idx, l2idx, l1idx; |
885 | pd_entry_t *pd; |
886 | paddr_t pdppa; |
887 | pt_entry_t *ptes, *pte; |
888 | |
889 | l4idx = (addr & L4_MASK0x0000ff8000000000UL) >> L4_SHIFT39; /* PML4E idx */ |
890 | l3idx = (addr & L3_MASK0x0000007fc0000000UL) >> L3_SHIFT30; /* PDPTE idx */ |
891 | l2idx = (addr & L2_MASK0x000000003fe00000UL) >> L2_SHIFT21; /* PDE idx */ |
892 | l1idx = (addr & L1_MASK0x00000000001ff000UL) >> L1_SHIFT12; /* PTE idx */ |
893 | |
894 | pd = (pd_entry_t *)pmap->pm_pdir; |
895 | if (pd == NULL((void *)0)) |
896 | return NULL((void *)0); |
897 | |
898 | /* |
899 | * l4idx should always be 0 since we don't support more than 512GB |
900 | * guest physical memory. |
901 | */ |
902 | if (l4idx > 0) |
903 | return NULL((void *)0); |
904 | |
905 | /* |
906 | * l3idx should always be < MAXDSIZ/1GB because we don't support more |
907 | * than MAXDSIZ guest phys mem. |
908 | */ |
909 | if (l3idx >= MAXDSIZ((paddr_t)128*1024*1024*1024) / ((paddr_t)1024 * 1024 * 1024)) |
910 | return NULL((void *)0); |
911 | |
912 | pdppa = pd[l4idx] & PG_FRAME0x000ffffffffff000UL; |
913 | if (pdppa == 0) |
914 | return NULL((void *)0); |
915 | |
916 | ptes = (pt_entry_t *)PMAP_DIRECT_MAP(pdppa)((vaddr_t)(((((511 - 4) * (1ULL << 39))) | 0xffff000000000000 )) + (pdppa)); |
917 | |
918 | pdppa = ptes[l3idx] & PG_FRAME0x000ffffffffff000UL; |
919 | if (pdppa == 0) |
920 | return NULL((void *)0); |
921 | |
922 | ptes = (pt_entry_t *)PMAP_DIRECT_MAP(pdppa)((vaddr_t)(((((511 - 4) * (1ULL << 39))) | 0xffff000000000000 )) + (pdppa)); |
923 | |
924 | pdppa = ptes[l2idx] & PG_FRAME0x000ffffffffff000UL; |
925 | if (pdppa == 0) |
926 | return NULL((void *)0); |
927 | |
928 | ptes = (pt_entry_t *)PMAP_DIRECT_MAP(pdppa)((vaddr_t)(((((511 - 4) * (1ULL << 39))) | 0xffff000000000000 )) + (pdppa)); |
929 | |
930 | pte = &ptes[l1idx]; |
931 | if (*pte == 0) |
932 | return NULL((void *)0); |
933 | |
934 | return pte; |
935 | } |
936 | |
937 | /* |
938 | * vmm_start |
939 | * |
940 | * Starts VMM mode on the system |
941 | */ |
942 | int |
943 | vmm_start(void) |
944 | { |
945 | struct cpu_info *self = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;}); |
946 | #ifdef MULTIPROCESSOR1 |
947 | struct cpu_info *ci; |
948 | CPU_INFO_ITERATORint cii; |
949 | #ifdef MP_LOCKDEBUG |
950 | int nticks; |
951 | #endif /* MP_LOCKDEBUG */ |
952 | #endif /* MULTIPROCESSOR */ |
953 | |
954 | /* VMM is already running */ |
955 | if (self->ci_flags & CPUF_VMM0x20000) |
956 | return (0); |
957 | |
958 | /* Start VMM on this CPU */ |
959 | start_vmm_on_cpu(self); |
960 | if (!(self->ci_flags & CPUF_VMM0x20000)) { |
961 | printf("%s: failed to enter VMM mode\n", |
962 | self->ci_dev->dv_xname); |
963 | return (EIO5); |
964 | } |
965 | |
966 | #ifdef MULTIPROCESSOR1 |
967 | /* Broadcast start VMM IPI */ |
968 | x86_broadcast_ipi(X86_IPI_START_VMM0x00000100); |
969 | |
970 | CPU_INFO_FOREACH(cii, ci)for (cii = 0, ci = cpu_info_list; ci != ((void *)0); ci = ci-> ci_next) { |
971 | if (ci == self) |
972 | continue; |
973 | #ifdef MP_LOCKDEBUG |
974 | nticks = __mp_lock_spinout; |
975 | #endif /* MP_LOCKDEBUG */ |
976 | while (!(ci->ci_flags & CPUF_VMM0x20000)) { |
977 | CPU_BUSY_CYCLE()__asm volatile("pause": : : "memory"); |
978 | #ifdef MP_LOCKDEBUG |
979 | if (--nticks <= 0) { |
980 | db_printf("%s: spun out", __func__); |
981 | db_enter(); |
982 | nticks = __mp_lock_spinout; |
983 | } |
984 | #endif /* MP_LOCKDEBUG */ |
985 | } |
986 | } |
987 | #endif /* MULTIPROCESSOR */ |
988 | |
989 | return (0); |
990 | } |
991 | |
992 | /* |
993 | * vmm_stop |
994 | * |
995 | * Stops VMM mode on the system |
996 | */ |
997 | int |
998 | vmm_stop(void) |
999 | { |
1000 | struct cpu_info *self = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;}); |
1001 | #ifdef MULTIPROCESSOR1 |
1002 | struct cpu_info *ci; |
1003 | CPU_INFO_ITERATORint cii; |
1004 | #ifdef MP_LOCKDEBUG |
1005 | int nticks; |
1006 | #endif /* MP_LOCKDEBUG */ |
1007 | #endif /* MULTIPROCESSOR */ |
1008 | |
1009 | /* VMM is not running */ |
1010 | if (!(self->ci_flags & CPUF_VMM0x20000)) |
1011 | return (0); |
1012 | |
1013 | /* Stop VMM on this CPU */ |
1014 | stop_vmm_on_cpu(self); |
1015 | if (self->ci_flags & CPUF_VMM0x20000) { |
1016 | printf("%s: failed to exit VMM mode\n", |
1017 | self->ci_dev->dv_xname); |
1018 | return (EIO5); |
1019 | } |
1020 | |
1021 | #ifdef MULTIPROCESSOR1 |
1022 | /* Stop VMM on other CPUs */ |
1023 | x86_broadcast_ipi(X86_IPI_STOP_VMM0x00000200); |
1024 | |
1025 | CPU_INFO_FOREACH(cii, ci)for (cii = 0, ci = cpu_info_list; ci != ((void *)0); ci = ci-> ci_next) { |
1026 | if (ci == self) |
1027 | continue; |
1028 | #ifdef MP_LOCKDEBUG |
1029 | nticks = __mp_lock_spinout; |
1030 | #endif /* MP_LOCKDEBUG */ |
1031 | while ((ci->ci_flags & CPUF_VMM0x20000)) { |
1032 | CPU_BUSY_CYCLE()__asm volatile("pause": : : "memory"); |
1033 | #ifdef MP_LOCKDEBUG |
1034 | if (--nticks <= 0) { |
1035 | db_printf("%s: spunout", __func__); |
1036 | db_enter(); |
1037 | nticks = __mp_lock_spinout; |
1038 | } |
1039 | #endif /* MP_LOCKDEBUG */ |
1040 | } |
1041 | } |
1042 | #endif /* MULTIPROCESSOR */ |
1043 | |
1044 | return (0); |
1045 | } |
1046 | |
1047 | /* |
1048 | * start_vmm_on_cpu |
1049 | * |
1050 | * Starts VMM mode on 'ci' by executing the appropriate CPU-specific insn |
1051 | * sequence to enter VMM mode (eg, VMXON) |
1052 | */ |
1053 | void |
1054 | start_vmm_on_cpu(struct cpu_info *ci) |
1055 | { |
1056 | uint64_t msr; |
1057 | uint32_t cr4; |
1058 | struct vmx_invept_descriptor vid; |
1059 | |
1060 | /* No VMM mode? exit. */ |
1061 | if ((ci->ci_vmm_flags & CI_VMM_VMX(1 << 0)) == 0 && |
1062 | (ci->ci_vmm_flags & CI_VMM_SVM(1 << 1)) == 0) |
1063 | return; |
1064 | |
1065 | /* |
1066 | * AMD SVM |
1067 | */ |
1068 | if (ci->ci_vmm_flags & CI_VMM_SVM(1 << 1)) { |
1069 | msr = rdmsr(MSR_EFER0xc0000080); |
1070 | msr |= EFER_SVME0x00001000; |
1071 | wrmsr(MSR_EFER0xc0000080, msr); |
1072 | } |
1073 | |
1074 | /* |
1075 | * Intel VMX |
1076 | */ |
1077 | if (ci->ci_vmm_flags & CI_VMM_VMX(1 << 0)) { |
1078 | if (ci->ci_vmxon_region == 0) |
1079 | return; |
1080 | else { |
1081 | bzero(ci->ci_vmxon_region, PAGE_SIZE)__builtin_bzero((ci->ci_vmxon_region), ((1 << 12))); |
1082 | ci->ci_vmxon_region->vr_revision = |
1083 | ci->ci_vmm_cap.vcc_vmx.vmx_vmxon_revision; |
1084 | |
1085 | /* Enable VMX */ |
1086 | msr = rdmsr(MSR_IA32_FEATURE_CONTROL0x03a); |
1087 | if (msr & IA32_FEATURE_CONTROL_LOCK0x01) { |
1088 | if (!(msr & IA32_FEATURE_CONTROL_VMX_EN0x04)) |
1089 | return; |
1090 | } else { |
1091 | msr |= IA32_FEATURE_CONTROL_VMX_EN0x04 | |
1092 | IA32_FEATURE_CONTROL_LOCK0x01; |
1093 | wrmsr(MSR_IA32_FEATURE_CONTROL0x03a, msr); |
1094 | } |
1095 | |
1096 | /* Set CR4.VMXE */ |
1097 | cr4 = rcr4(); |
1098 | cr4 |= CR4_VMXE0x00002000; |
1099 | lcr4(cr4); |
1100 | |
1101 | /* Enter VMX mode and clear EPTs on this cpu */ |
1102 | if (vmxon((uint64_t *)&ci->ci_vmxon_region_pa)) |
1103 | panic("vmxon failed"); |
1104 | |
1105 | memset(&vid, 0, sizeof(vid))__builtin_memset((&vid), (0), (sizeof(vid))); |
1106 | if (invept(IA32_VMX_INVEPT_GLOBAL_CTX0x2, &vid)) |
1107 | panic("invept failed"); |
1108 | } |
1109 | } |
1110 | |
1111 | atomic_setbits_intx86_atomic_setbits_u32(&ci->ci_flags, CPUF_VMM0x20000); |
1112 | } |
1113 | |
1114 | /* |
1115 | * stop_vmm_on_cpu |
1116 | * |
1117 | * Stops VMM mode on 'ci' by executing the appropriate CPU-specific insn |
1118 | * sequence to exit VMM mode (eg, VMXOFF) |
1119 | */ |
1120 | void |
1121 | stop_vmm_on_cpu(struct cpu_info *ci) |
1122 | { |
1123 | uint64_t msr; |
1124 | uint32_t cr4; |
1125 | |
1126 | if (!(ci->ci_flags & CPUF_VMM0x20000)) |
1127 | return; |
1128 | |
1129 | /* |
1130 | * AMD SVM |
1131 | */ |
1132 | if (ci->ci_vmm_flags & CI_VMM_SVM(1 << 1)) { |
1133 | msr = rdmsr(MSR_EFER0xc0000080); |
1134 | msr &= ~EFER_SVME0x00001000; |
1135 | wrmsr(MSR_EFER0xc0000080, msr); |
1136 | } |
1137 | |
1138 | /* |
1139 | * Intel VMX |
1140 | */ |
1141 | if (ci->ci_vmm_flags & CI_VMM_VMX(1 << 0)) { |
1142 | if (vmxoff()) |
1143 | panic("VMXOFF failed"); |
1144 | |
1145 | cr4 = rcr4(); |
1146 | cr4 &= ~CR4_VMXE0x00002000; |
1147 | lcr4(cr4); |
1148 | } |
1149 | |
1150 | atomic_clearbits_intx86_atomic_clearbits_u32(&ci->ci_flags, CPUF_VMM0x20000); |
1151 | } |
1152 | |
1153 | /* |
1154 | * vmclear_on_cpu |
1155 | * |
1156 | * Flush and clear VMCS on 'ci' by executing vmclear. |
1157 | * |
1158 | */ |
1159 | void |
1160 | vmclear_on_cpu(struct cpu_info *ci) |
1161 | { |
1162 | if ((ci->ci_flags & CPUF_VMM0x20000) && (ci->ci_vmm_flags & CI_VMM_VMX(1 << 0))) { |
1163 | if (vmclear(&ci->ci_vmcs_pa)) |
1164 | panic("VMCLEAR ipi failed"); |
1165 | atomic_swap_ulong(&ci->ci_vmcs_pa, VMX_VMCS_PA_CLEAR)_atomic_swap_ulong((&ci->ci_vmcs_pa), (0xFFFFFFFFFFFFFFFFUL )); |
1166 | } |
1167 | } |
1168 | |
1169 | #ifdef MULTIPROCESSOR1 |
1170 | static int |
1171 | vmx_remote_vmclear(struct cpu_info *ci, struct vcpu *vcpu) |
1172 | { |
1173 | #ifdef MP_LOCKDEBUG |
1174 | int nticks = __mp_lock_spinout; |
1175 | #endif /* MP_LOCKDEBUG */ |
1176 | |
1177 | rw_enter_write(&ci->ci_vmcs_lock); |
1178 | atomic_swap_ulong(&ci->ci_vmcs_pa, vcpu->vc_control_pa)_atomic_swap_ulong((&ci->ci_vmcs_pa), (vcpu->vc_control_pa )); |
1179 | x86_send_ipi(ci, X86_IPI_VMCLEAR_VMM0x00000004); |
1180 | |
1181 | while (ci->ci_vmcs_pa != VMX_VMCS_PA_CLEAR0xFFFFFFFFFFFFFFFFUL) { |
1182 | CPU_BUSY_CYCLE()__asm volatile("pause": : : "memory"); |
1183 | #ifdef MP_LOCKDEBUG |
1184 | if (--nticks <= 0) { |
1185 | db_printf("%s: spun out\n", __func__); |
1186 | db_enter(); |
1187 | nticks = __mp_lock_spinout; |
1188 | } |
1189 | #endif /* MP_LOCKDEBUG */ |
1190 | } |
1191 | atomic_swap_uint(&vcpu->vc_vmx_vmcs_state, VMCS_CLEARED)_atomic_swap_uint((&vcpu->vc_vmx_vmcs_state), (0)); |
1192 | rw_exit_write(&ci->ci_vmcs_lock); |
1193 | |
1194 | return (0); |
1195 | } |
1196 | #endif /* MULTIPROCESSOR */ |
1197 | |
1198 | /* |
1199 | * vm_impl_init_vmx |
1200 | * |
1201 | * Intel VMX specific VM initialization routine |
1202 | * |
1203 | * Parameters: |
1204 | * vm: the VM being initialized |
1205 | * p: vmd process owning the VM |
1206 | * |
1207 | * Return values: |
1208 | * 0: the initialization was successful |
1209 | * ENOMEM: the initialization failed (lack of resources) |
1210 | */ |
1211 | int |
1212 | vm_impl_init_vmx(struct vm *vm, struct proc *p) |
1213 | { |
1214 | int i, ret; |
1215 | vaddr_t mingpa, maxgpa; |
1216 | struct vm_mem_range *vmr; |
1217 | |
1218 | /* If not EPT, nothing to do here */ |
1219 | if (vmm_softc->mode != VMM_MODE_EPT) |
1220 | return (0); |
1221 | |
1222 | vmr = &vm->vm_memranges[0]; |
1223 | mingpa = vmr->vmr_gpa; |
1224 | vmr = &vm->vm_memranges[vm->vm_nmemranges - 1]; |
1225 | maxgpa = vmr->vmr_gpa + vmr->vmr_size; |
1226 | |
1227 | /* |
1228 | * uvmspace_alloc (currently) always returns a valid vmspace |
1229 | */ |
1230 | vm->vm_vmspace = uvmspace_alloc(mingpa, maxgpa, TRUE1, FALSE0); |
1231 | vm->vm_map = &vm->vm_vmspace->vm_map; |
1232 | |
1233 | /* Map the new map with an anon */ |
1234 | DPRINTF("%s: created vm_map @ %p\n", __func__, vm->vm_map); |
1235 | for (i = 0; i < vm->vm_nmemranges; i++) { |
1236 | vmr = &vm->vm_memranges[i]; |
1237 | ret = uvm_share(vm->vm_map, vmr->vmr_gpa, |
1238 | PROT_READ0x01 | PROT_WRITE0x02 | PROT_EXEC0x04, |
1239 | &p->p_vmspace->vm_map, vmr->vmr_va, vmr->vmr_size); |
1240 | if (ret) { |
1241 | printf("%s: uvm_share failed (%d)\n", __func__, ret); |
1242 | /* uvmspace_free calls pmap_destroy for us */ |
1243 | uvmspace_free(vm->vm_vmspace); |
1244 | vm->vm_vmspace = NULL((void *)0); |
1245 | return (ENOMEM12); |
1246 | } |
1247 | } |
1248 | |
1249 | pmap_convert(vm->vm_map->pmap, PMAP_TYPE_EPT2); |
1250 | |
1251 | return (0); |
1252 | } |
1253 | |
1254 | /* |
1255 | * vm_impl_init_svm |
1256 | * |
1257 | * AMD SVM specific VM initialization routine |
1258 | * |
1259 | * Parameters: |
1260 | * vm: the VM being initialized |
1261 | * p: vmd process owning the VM |
1262 | * |
1263 | * Return values: |
1264 | * 0: the initialization was successful |
1265 | * ENOMEM: the initialization failed (lack of resources) |
1266 | */ |
1267 | int |
1268 | vm_impl_init_svm(struct vm *vm, struct proc *p) |
1269 | { |
1270 | int i, ret; |
1271 | vaddr_t mingpa, maxgpa; |
1272 | struct vm_mem_range *vmr; |
1273 | |
1274 | /* If not RVI, nothing to do here */ |
1275 | if (vmm_softc->mode != VMM_MODE_RVI) |
1276 | return (0); |
1277 | |
1278 | vmr = &vm->vm_memranges[0]; |
1279 | mingpa = vmr->vmr_gpa; |
1280 | vmr = &vm->vm_memranges[vm->vm_nmemranges - 1]; |
1281 | maxgpa = vmr->vmr_gpa + vmr->vmr_size; |
1282 | |
1283 | /* |
1284 | * uvmspace_alloc (currently) always returns a valid vmspace |
1285 | */ |
1286 | vm->vm_vmspace = uvmspace_alloc(mingpa, maxgpa, TRUE1, FALSE0); |
1287 | vm->vm_map = &vm->vm_vmspace->vm_map; |
1288 | |
1289 | /* Map the new map with an anon */ |
1290 | DPRINTF("%s: created vm_map @ %p\n", __func__, vm->vm_map); |
1291 | for (i = 0; i < vm->vm_nmemranges; i++) { |
1292 | vmr = &vm->vm_memranges[i]; |
1293 | ret = uvm_share(vm->vm_map, vmr->vmr_gpa, |
1294 | PROT_READ0x01 | PROT_WRITE0x02 | PROT_EXEC0x04, |
1295 | &p->p_vmspace->vm_map, vmr->vmr_va, vmr->vmr_size); |
1296 | if (ret) { |
1297 | printf("%s: uvm_share failed (%d)\n", __func__, ret); |
1298 | /* uvmspace_free calls pmap_destroy for us */ |
1299 | uvmspace_free(vm->vm_vmspace); |
1300 | vm->vm_vmspace = NULL((void *)0); |
1301 | return (ENOMEM12); |
1302 | } |
1303 | } |
1304 | |
1305 | /* Convert pmap to RVI */ |
1306 | pmap_convert(vm->vm_map->pmap, PMAP_TYPE_RVI3); |
1307 | |
1308 | return (0); |
1309 | } |
1310 | |
1311 | /* |
1312 | * vm_impl_init |
1313 | * |
1314 | * Calls the architecture-specific VM init routine |
1315 | * |
1316 | * Parameters: |
1317 | * vm: the VM being initialized |
1318 | * p: vmd process owning the VM |
1319 | * |
1320 | * Return values (from architecture-specific init routines): |
1321 | * 0: the initialization was successful |
1322 | * ENOMEM: the initialization failed (lack of resources) |
1323 | */ |
1324 | int |
1325 | vm_impl_init(struct vm *vm, struct proc *p) |
1326 | { |
1327 | int ret; |
1328 | |
1329 | KERNEL_LOCK()_kernel_lock(); |
1330 | if (vmm_softc->mode == VMM_MODE_EPT) |
1331 | ret = vm_impl_init_vmx(vm, p); |
1332 | else if (vmm_softc->mode == VMM_MODE_RVI) |
1333 | ret = vm_impl_init_svm(vm, p); |
1334 | else |
1335 | panic("%s: unknown vmm mode: %d", __func__, vmm_softc->mode); |
1336 | KERNEL_UNLOCK()_kernel_unlock(); |
1337 | |
1338 | return (ret); |
1339 | } |
1340 | |
1341 | void |
1342 | vm_impl_deinit(struct vm *vm) |
1343 | { |
1344 | /* unused */ |
1345 | } |
1346 | |
1347 | /* |
1348 | * vcpu_reload_vmcs_vmx |
1349 | * |
1350 | * (Re)load the VMCS on the current cpu. Must be called with the VMCS write |
1351 | * lock acquired. If the VMCS is determined to be loaded on a remote cpu, an |
1352 | * ipi will be used to remotely flush it before loading the VMCS locally. |
1353 | * |
1354 | * Parameters: |
1355 | * vcpu: Pointer to the vcpu needing its VMCS |
1356 | * |
1357 | * Return values: |
1358 | * 0: if successful |
1359 | * EINVAL: an error occurred during flush or reload |
1360 | */ |
1361 | int |
1362 | vcpu_reload_vmcs_vmx(struct vcpu *vcpu) |
1363 | { |
1364 | struct cpu_info *ci, *last_ci; |
1365 | |
1366 | rw_assert_wrlock(&vcpu->vc_lock); |
1367 | |
1368 | ci = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;}); |
1369 | last_ci = vcpu->vc_last_pcpu; |
1370 | |
1371 | if (last_ci == NULL((void *)0)) { |
1372 | /* First launch */ |
1373 | if (vmclear(&vcpu->vc_control_pa)) |
1374 | return (EINVAL22); |
1375 | atomic_swap_uint(&vcpu->vc_vmx_vmcs_state, VMCS_CLEARED)_atomic_swap_uint((&vcpu->vc_vmx_vmcs_state), (0)); |
1376 | #ifdef MULTIPROCESSOR1 |
1377 | } else if (last_ci != ci) { |
1378 | /* We've moved CPUs at some point, so remote VMCLEAR */ |
1379 | if (vmx_remote_vmclear(last_ci, vcpu)) |
1380 | return (EINVAL22); |
1381 | KASSERT(vcpu->vc_vmx_vmcs_state == VMCS_CLEARED)((vcpu->vc_vmx_vmcs_state == 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/arch/amd64/amd64/vmm_machdep.c", 1381, "vcpu->vc_vmx_vmcs_state == VMCS_CLEARED" )); |
1382 | #endif /* MULTIPROCESSOR */ |
1383 | } |
1384 | |
1385 | if (vmptrld(&vcpu->vc_control_pa)) { |
1386 | printf("%s: vmptrld\n", __func__); |
1387 | return (EINVAL22); |
1388 | } |
1389 | |
1390 | return (0); |
1391 | } |
1392 | |
1393 | /* |
1394 | * vcpu_readregs_vmx |
1395 | * |
1396 | * Reads 'vcpu's registers |
1397 | * |
1398 | * Parameters: |
1399 | * vcpu: the vcpu to read register values from |
1400 | * regmask: the types of registers to read |
1401 | * loadvmcs: bit to indicate whether the VMCS has to be loaded first |
1402 | * vrs: output parameter where register values are stored |
1403 | * |
1404 | * Return values: |
1405 | * 0: if successful |
1406 | * EINVAL: an error reading registers occurred |
1407 | */ |
1408 | int |
1409 | vcpu_readregs_vmx(struct vcpu *vcpu, uint64_t regmask, int loadvmcs, |
1410 | struct vcpu_reg_state *vrs) |
1411 | { |
1412 | int i, ret = 0; |
1413 | uint64_t sel, limit, ar; |
1414 | uint64_t *gprs = vrs->vrs_gprs; |
1415 | uint64_t *crs = vrs->vrs_crs; |
1416 | uint64_t *msrs = vrs->vrs_msrs; |
1417 | uint64_t *drs = vrs->vrs_drs; |
1418 | struct vcpu_segment_info *sregs = vrs->vrs_sregs; |
1419 | struct vmx_msr_store *msr_store; |
1420 | |
1421 | if (loadvmcs) { |
1422 | if (vcpu_reload_vmcs_vmx(vcpu)) |
1423 | return (EINVAL22); |
1424 | } |
1425 | |
1426 | #ifdef VMM_DEBUG |
1427 | /* VMCS should be loaded... */ |
1428 | paddr_t pa = 0ULL; |
1429 | if (vmptrst(&pa)) |
1430 | panic("%s: vmptrst", __func__); |
1431 | KASSERT(pa == vcpu->vc_control_pa)((pa == vcpu->vc_control_pa) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/arch/amd64/amd64/vmm_machdep.c", 1431, "pa == vcpu->vc_control_pa" )); |
1432 | #endif /* VMM_DEBUG */ |
1433 | |
1434 | if (regmask & VM_RWREGS_GPRS0x1) { |
1435 | gprs[VCPU_REGS_RAX0] = vcpu->vc_gueststate.vg_rax; |
1436 | gprs[VCPU_REGS_RBX3] = vcpu->vc_gueststate.vg_rbx; |
1437 | gprs[VCPU_REGS_RCX1] = vcpu->vc_gueststate.vg_rcx; |
1438 | gprs[VCPU_REGS_RDX2] = vcpu->vc_gueststate.vg_rdx; |
1439 | gprs[VCPU_REGS_RSI6] = vcpu->vc_gueststate.vg_rsi; |
1440 | gprs[VCPU_REGS_RDI7] = vcpu->vc_gueststate.vg_rdi; |
1441 | gprs[VCPU_REGS_R88] = vcpu->vc_gueststate.vg_r8; |
1442 | gprs[VCPU_REGS_R99] = vcpu->vc_gueststate.vg_r9; |
1443 | gprs[VCPU_REGS_R1010] = vcpu->vc_gueststate.vg_r10; |
1444 | gprs[VCPU_REGS_R1111] = vcpu->vc_gueststate.vg_r11; |
1445 | gprs[VCPU_REGS_R1212] = vcpu->vc_gueststate.vg_r12; |
1446 | gprs[VCPU_REGS_R1313] = vcpu->vc_gueststate.vg_r13; |
1447 | gprs[VCPU_REGS_R1414] = vcpu->vc_gueststate.vg_r14; |
1448 | gprs[VCPU_REGS_R1515] = vcpu->vc_gueststate.vg_r15; |
1449 | gprs[VCPU_REGS_RBP5] = vcpu->vc_gueststate.vg_rbp; |
1450 | gprs[VCPU_REGS_RIP16] = vcpu->vc_gueststate.vg_rip; |
1451 | if (vmread(VMCS_GUEST_IA32_RSP0x681C, &gprs[VCPU_REGS_RSP4])) |
1452 | goto errout; |
1453 | if (vmread(VMCS_GUEST_IA32_RFLAGS0x6820, &gprs[VCPU_REGS_RFLAGS17])) |
1454 | goto errout; |
1455 | } |
1456 | |
1457 | if (regmask & VM_RWREGS_SREGS0x2) { |
1458 | for (i = 0; i < nitems(vmm_vmx_sreg_vmcs_fields)(sizeof((vmm_vmx_sreg_vmcs_fields)) / sizeof((vmm_vmx_sreg_vmcs_fields )[0])); i++) { |
1459 | if (vmread(vmm_vmx_sreg_vmcs_fields[i].selid, &sel)) |
1460 | goto errout; |
1461 | if (vmread(vmm_vmx_sreg_vmcs_fields[i].limitid, &limit)) |
1462 | goto errout; |
1463 | if (vmread(vmm_vmx_sreg_vmcs_fields[i].arid, &ar)) |
1464 | goto errout; |
1465 | if (vmread(vmm_vmx_sreg_vmcs_fields[i].baseid, |
1466 | &sregs[i].vsi_base)) |
1467 | goto errout; |
1468 | |
1469 | sregs[i].vsi_sel = sel; |
1470 | sregs[i].vsi_limit = limit; |
1471 | sregs[i].vsi_ar = ar; |
1472 | } |
1473 | |
1474 | if (vmread(VMCS_GUEST_IA32_GDTR_LIMIT0x4810, &limit)) |
1475 | goto errout; |
1476 | if (vmread(VMCS_GUEST_IA32_GDTR_BASE0x6816, |
1477 | &vrs->vrs_gdtr.vsi_base)) |
1478 | goto errout; |
1479 | vrs->vrs_gdtr.vsi_limit = limit; |
1480 | |
1481 | if (vmread(VMCS_GUEST_IA32_IDTR_LIMIT0x4812, &limit)) |
1482 | goto errout; |
1483 | if (vmread(VMCS_GUEST_IA32_IDTR_BASE0x6818, |
1484 | &vrs->vrs_idtr.vsi_base)) |
1485 | goto errout; |
1486 | vrs->vrs_idtr.vsi_limit = limit; |
1487 | } |
1488 | |
1489 | if (regmask & VM_RWREGS_CRS0x4) { |
1490 | crs[VCPU_REGS_CR21] = vcpu->vc_gueststate.vg_cr2; |
1491 | crs[VCPU_REGS_XCR05] = vcpu->vc_gueststate.vg_xcr0; |
1492 | if (vmread(VMCS_GUEST_IA32_CR00x6800, &crs[VCPU_REGS_CR00])) |
1493 | goto errout; |
1494 | if (vmread(VMCS_GUEST_IA32_CR30x6802, &crs[VCPU_REGS_CR32])) |
1495 | goto errout; |
1496 | if (vmread(VMCS_GUEST_IA32_CR40x6804, &crs[VCPU_REGS_CR43])) |
1497 | goto errout; |
1498 | if (vmread(VMCS_GUEST_PDPTE00x280A, &crs[VCPU_REGS_PDPTE06])) |
1499 | goto errout; |
1500 | if (vmread(VMCS_GUEST_PDPTE10x280C, &crs[VCPU_REGS_PDPTE17])) |
1501 | goto errout; |
1502 | if (vmread(VMCS_GUEST_PDPTE20x280E, &crs[VCPU_REGS_PDPTE28])) |
1503 | goto errout; |
1504 | if (vmread(VMCS_GUEST_PDPTE30x2810, &crs[VCPU_REGS_PDPTE39])) |
1505 | goto errout; |
1506 | } |
1507 | |
1508 | msr_store = (struct vmx_msr_store *)vcpu->vc_vmx_msr_exit_save_va; |
1509 | |
1510 | if (regmask & VM_RWREGS_MSRS0x8) { |
1511 | for (i = 0; i < VCPU_REGS_NMSRS(6 + 1); i++) { |
1512 | msrs[i] = msr_store[i].vms_data; |
1513 | } |
1514 | } |
1515 | |
1516 | if (regmask & VM_RWREGS_DRS0x10) { |
1517 | drs[VCPU_REGS_DR00] = vcpu->vc_gueststate.vg_dr0; |
1518 | drs[VCPU_REGS_DR11] = vcpu->vc_gueststate.vg_dr1; |
1519 | drs[VCPU_REGS_DR22] = vcpu->vc_gueststate.vg_dr2; |
1520 | drs[VCPU_REGS_DR33] = vcpu->vc_gueststate.vg_dr3; |
1521 | drs[VCPU_REGS_DR64] = vcpu->vc_gueststate.vg_dr6; |
1522 | if (vmread(VMCS_GUEST_IA32_DR70x681A, &drs[VCPU_REGS_DR75])) |
1523 | goto errout; |
1524 | } |
1525 | |
1526 | goto out; |
1527 | |
1528 | errout: |
1529 | ret = EINVAL22; |
1530 | out: |
1531 | return (ret); |
1532 | } |
1533 | |
1534 | /* |
1535 | * vcpu_readregs_svm |
1536 | * |
1537 | * Reads 'vcpu's registers |
1538 | * |
1539 | * Parameters: |
1540 | * vcpu: the vcpu to read register values from |
1541 | * regmask: the types of registers to read |
1542 | * vrs: output parameter where register values are stored |
1543 | * |
1544 | * Return values: |
1545 | * 0: if successful |
1546 | */ |
1547 | int |
1548 | vcpu_readregs_svm(struct vcpu *vcpu, uint64_t regmask, |
1549 | struct vcpu_reg_state *vrs) |
1550 | { |
1551 | uint64_t *gprs = vrs->vrs_gprs; |
1552 | uint64_t *crs = vrs->vrs_crs; |
1553 | uint64_t *msrs = vrs->vrs_msrs; |
1554 | uint64_t *drs = vrs->vrs_drs; |
1555 | uint32_t attr; |
1556 | struct vcpu_segment_info *sregs = vrs->vrs_sregs; |
1557 | struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va; |
1558 | |
1559 | if (regmask & VM_RWREGS_GPRS0x1) { |
1560 | gprs[VCPU_REGS_RAX0] = vmcb->v_rax; |
1561 | gprs[VCPU_REGS_RBX3] = vcpu->vc_gueststate.vg_rbx; |
1562 | gprs[VCPU_REGS_RCX1] = vcpu->vc_gueststate.vg_rcx; |
1563 | gprs[VCPU_REGS_RDX2] = vcpu->vc_gueststate.vg_rdx; |
1564 | gprs[VCPU_REGS_RSI6] = vcpu->vc_gueststate.vg_rsi; |
1565 | gprs[VCPU_REGS_RDI7] = vcpu->vc_gueststate.vg_rdi; |
1566 | gprs[VCPU_REGS_R88] = vcpu->vc_gueststate.vg_r8; |
1567 | gprs[VCPU_REGS_R99] = vcpu->vc_gueststate.vg_r9; |
1568 | gprs[VCPU_REGS_R1010] = vcpu->vc_gueststate.vg_r10; |
1569 | gprs[VCPU_REGS_R1111] = vcpu->vc_gueststate.vg_r11; |
1570 | gprs[VCPU_REGS_R1212] = vcpu->vc_gueststate.vg_r12; |
1571 | gprs[VCPU_REGS_R1313] = vcpu->vc_gueststate.vg_r13; |
1572 | gprs[VCPU_REGS_R1414] = vcpu->vc_gueststate.vg_r14; |
1573 | gprs[VCPU_REGS_R1515] = vcpu->vc_gueststate.vg_r15; |
1574 | gprs[VCPU_REGS_RBP5] = vcpu->vc_gueststate.vg_rbp; |
1575 | gprs[VCPU_REGS_RIP16] = vmcb->v_rip; |
1576 | gprs[VCPU_REGS_RSP4] = vmcb->v_rsp; |
1577 | gprs[VCPU_REGS_RFLAGS17] = vmcb->v_rflags; |
1578 | } |
1579 | |
1580 | if (regmask & VM_RWREGS_SREGS0x2) { |
1581 | sregs[VCPU_REGS_CS1].vsi_sel = vmcb->v_cs.vs_sel; |
1582 | sregs[VCPU_REGS_CS1].vsi_limit = vmcb->v_cs.vs_lim; |
1583 | attr = vmcb->v_cs.vs_attr; |
1584 | sregs[VCPU_REGS_CS1].vsi_ar = (attr & 0xff) | ((attr << 4) & |
1585 | 0xf000); |
1586 | sregs[VCPU_REGS_CS1].vsi_base = vmcb->v_cs.vs_base; |
1587 | |
1588 | sregs[VCPU_REGS_DS3].vsi_sel = vmcb->v_ds.vs_sel; |
1589 | sregs[VCPU_REGS_DS3].vsi_limit = vmcb->v_ds.vs_lim; |
1590 | attr = vmcb->v_ds.vs_attr; |
1591 | sregs[VCPU_REGS_DS3].vsi_ar = (attr & 0xff) | ((attr << 4) & |
1592 | 0xf000); |
1593 | sregs[VCPU_REGS_DS3].vsi_base = vmcb->v_ds.vs_base; |
1594 | |
1595 | sregs[VCPU_REGS_ES0].vsi_sel = vmcb->v_es.vs_sel; |
1596 | sregs[VCPU_REGS_ES0].vsi_limit = vmcb->v_es.vs_lim; |
1597 | attr = vmcb->v_es.vs_attr; |
1598 | sregs[VCPU_REGS_ES0].vsi_ar = (attr & 0xff) | ((attr << 4) & |
1599 | 0xf000); |
1600 | sregs[VCPU_REGS_ES0].vsi_base = vmcb->v_es.vs_base; |
1601 | |
1602 | sregs[VCPU_REGS_FS4].vsi_sel = vmcb->v_fs.vs_sel; |
1603 | sregs[VCPU_REGS_FS4].vsi_limit = vmcb->v_fs.vs_lim; |
1604 | attr = vmcb->v_fs.vs_attr; |
1605 | sregs[VCPU_REGS_FS4].vsi_ar = (attr & 0xff) | ((attr << 4) & |
1606 | 0xf000); |
1607 | sregs[VCPU_REGS_FS4].vsi_base = vmcb->v_fs.vs_base; |
1608 | |
1609 | sregs[VCPU_REGS_GS5].vsi_sel = vmcb->v_gs.vs_sel; |
1610 | sregs[VCPU_REGS_GS5].vsi_limit = vmcb->v_gs.vs_lim; |
1611 | attr = vmcb->v_gs.vs_attr; |
1612 | sregs[VCPU_REGS_GS5].vsi_ar = (attr & 0xff) | ((attr << 4) & |
1613 | 0xf000); |
1614 | sregs[VCPU_REGS_GS5].vsi_base = vmcb->v_gs.vs_base; |
1615 | |
1616 | sregs[VCPU_REGS_SS2].vsi_sel = vmcb->v_ss.vs_sel; |
1617 | sregs[VCPU_REGS_SS2].vsi_limit = vmcb->v_ss.vs_lim; |
1618 | attr = vmcb->v_ss.vs_attr; |
1619 | sregs[VCPU_REGS_SS2].vsi_ar = (attr & 0xff) | ((attr << 4) & |
1620 | 0xf000); |
1621 | sregs[VCPU_REGS_SS2].vsi_base = vmcb->v_ss.vs_base; |
1622 | |
1623 | sregs[VCPU_REGS_LDTR6].vsi_sel = vmcb->v_ldtr.vs_sel; |
1624 | sregs[VCPU_REGS_LDTR6].vsi_limit = vmcb->v_ldtr.vs_lim; |
1625 | attr = vmcb->v_ldtr.vs_attr; |
1626 | sregs[VCPU_REGS_LDTR6].vsi_ar = (attr & 0xff) | ((attr << 4) |
1627 | & 0xf000); |
1628 | sregs[VCPU_REGS_LDTR6].vsi_base = vmcb->v_ldtr.vs_base; |
1629 | |
1630 | sregs[VCPU_REGS_TR7].vsi_sel = vmcb->v_tr.vs_sel; |
1631 | sregs[VCPU_REGS_TR7].vsi_limit = vmcb->v_tr.vs_lim; |
1632 | attr = vmcb->v_tr.vs_attr; |
1633 | sregs[VCPU_REGS_TR7].vsi_ar = (attr & 0xff) | ((attr << 4) & |
1634 | 0xf000); |
1635 | sregs[VCPU_REGS_TR7].vsi_base = vmcb->v_tr.vs_base; |
1636 | |
1637 | vrs->vrs_gdtr.vsi_limit = vmcb->v_gdtr.vs_lim; |
1638 | vrs->vrs_gdtr.vsi_base = vmcb->v_gdtr.vs_base; |
1639 | vrs->vrs_idtr.vsi_limit = vmcb->v_idtr.vs_lim; |
1640 | vrs->vrs_idtr.vsi_base = vmcb->v_idtr.vs_base; |
1641 | } |
1642 | |
1643 | if (regmask & VM_RWREGS_CRS0x4) { |
1644 | crs[VCPU_REGS_CR00] = vmcb->v_cr0; |
1645 | crs[VCPU_REGS_CR32] = vmcb->v_cr3; |
1646 | crs[VCPU_REGS_CR43] = vmcb->v_cr4; |
1647 | crs[VCPU_REGS_CR21] = vcpu->vc_gueststate.vg_cr2; |
1648 | crs[VCPU_REGS_XCR05] = vcpu->vc_gueststate.vg_xcr0; |
1649 | } |
1650 | |
1651 | if (regmask & VM_RWREGS_MSRS0x8) { |
1652 | msrs[VCPU_REGS_EFER0] = vmcb->v_efer; |
1653 | msrs[VCPU_REGS_STAR1] = vmcb->v_star; |
1654 | msrs[VCPU_REGS_LSTAR2] = vmcb->v_lstar; |
1655 | msrs[VCPU_REGS_CSTAR3] = vmcb->v_cstar; |
1656 | msrs[VCPU_REGS_SFMASK4] = vmcb->v_sfmask; |
1657 | msrs[VCPU_REGS_KGSBASE5] = vmcb->v_kgsbase; |
1658 | } |
1659 | |
1660 | if (regmask & VM_RWREGS_DRS0x10) { |
1661 | drs[VCPU_REGS_DR00] = vcpu->vc_gueststate.vg_dr0; |
1662 | drs[VCPU_REGS_DR11] = vcpu->vc_gueststate.vg_dr1; |
1663 | drs[VCPU_REGS_DR22] = vcpu->vc_gueststate.vg_dr2; |
1664 | drs[VCPU_REGS_DR33] = vcpu->vc_gueststate.vg_dr3; |
1665 | drs[VCPU_REGS_DR64] = vmcb->v_dr6; |
1666 | drs[VCPU_REGS_DR75] = vmcb->v_dr7; |
1667 | } |
1668 | |
1669 | return (0); |
1670 | } |
1671 | |
1672 | /* |
1673 | * vcpu_writeregs_vmx |
1674 | * |
1675 | * Writes VCPU registers |
1676 | * |
1677 | * Parameters: |
1678 | * vcpu: the vcpu that has to get its registers written to |
1679 | * regmask: the types of registers to write |
1680 | * loadvmcs: bit to indicate whether the VMCS has to be loaded first |
1681 | * vrs: the register values to write |
1682 | * |
1683 | * Return values: |
1684 | * 0: if successful |
1685 | * EINVAL an error writing registers occurred |
1686 | */ |
1687 | int |
1688 | vcpu_writeregs_vmx(struct vcpu *vcpu, uint64_t regmask, int loadvmcs, |
1689 | struct vcpu_reg_state *vrs) |
1690 | { |
1691 | int i, ret = 0; |
1692 | uint16_t sel; |
1693 | uint64_t limit, ar; |
1694 | uint64_t *gprs = vrs->vrs_gprs; |
1695 | uint64_t *crs = vrs->vrs_crs; |
1696 | uint64_t *msrs = vrs->vrs_msrs; |
1697 | uint64_t *drs = vrs->vrs_drs; |
1698 | struct vcpu_segment_info *sregs = vrs->vrs_sregs; |
1699 | struct vmx_msr_store *msr_store; |
1700 | |
1701 | if (loadvmcs) { |
1702 | if (vcpu_reload_vmcs_vmx(vcpu)) |
1703 | return (EINVAL22); |
1704 | } |
1705 | |
1706 | #ifdef VMM_DEBUG |
1707 | /* VMCS should be loaded... */ |
1708 | paddr_t pa = 0ULL; |
1709 | if (vmptrst(&pa)) |
1710 | panic("%s: vmptrst", __func__); |
1711 | KASSERT(pa == vcpu->vc_control_pa)((pa == vcpu->vc_control_pa) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/arch/amd64/amd64/vmm_machdep.c", 1711, "pa == vcpu->vc_control_pa" )); |
1712 | #endif /* VMM_DEBUG */ |
1713 | |
1714 | if (regmask & VM_RWREGS_GPRS0x1) { |
1715 | vcpu->vc_gueststate.vg_rax = gprs[VCPU_REGS_RAX0]; |
1716 | vcpu->vc_gueststate.vg_rbx = gprs[VCPU_REGS_RBX3]; |
1717 | vcpu->vc_gueststate.vg_rcx = gprs[VCPU_REGS_RCX1]; |
1718 | vcpu->vc_gueststate.vg_rdx = gprs[VCPU_REGS_RDX2]; |
1719 | vcpu->vc_gueststate.vg_rsi = gprs[VCPU_REGS_RSI6]; |
1720 | vcpu->vc_gueststate.vg_rdi = gprs[VCPU_REGS_RDI7]; |
1721 | vcpu->vc_gueststate.vg_r8 = gprs[VCPU_REGS_R88]; |
1722 | vcpu->vc_gueststate.vg_r9 = gprs[VCPU_REGS_R99]; |
1723 | vcpu->vc_gueststate.vg_r10 = gprs[VCPU_REGS_R1010]; |
1724 | vcpu->vc_gueststate.vg_r11 = gprs[VCPU_REGS_R1111]; |
1725 | vcpu->vc_gueststate.vg_r12 = gprs[VCPU_REGS_R1212]; |
1726 | vcpu->vc_gueststate.vg_r13 = gprs[VCPU_REGS_R1313]; |
1727 | vcpu->vc_gueststate.vg_r14 = gprs[VCPU_REGS_R1414]; |
1728 | vcpu->vc_gueststate.vg_r15 = gprs[VCPU_REGS_R1515]; |
1729 | vcpu->vc_gueststate.vg_rbp = gprs[VCPU_REGS_RBP5]; |
1730 | vcpu->vc_gueststate.vg_rip = gprs[VCPU_REGS_RIP16]; |
1731 | if (vmwrite(VMCS_GUEST_IA32_RIP0x681E, gprs[VCPU_REGS_RIP16])) |
1732 | goto errout; |
1733 | if (vmwrite(VMCS_GUEST_IA32_RSP0x681C, gprs[VCPU_REGS_RSP4])) |
1734 | goto errout; |
1735 | if (vmwrite(VMCS_GUEST_IA32_RFLAGS0x6820, gprs[VCPU_REGS_RFLAGS17])) |
1736 | goto errout; |
1737 | } |
1738 | |
1739 | if (regmask & VM_RWREGS_SREGS0x2) { |
1740 | for (i = 0; i < nitems(vmm_vmx_sreg_vmcs_fields)(sizeof((vmm_vmx_sreg_vmcs_fields)) / sizeof((vmm_vmx_sreg_vmcs_fields )[0])); i++) { |
1741 | sel = sregs[i].vsi_sel; |
1742 | limit = sregs[i].vsi_limit; |
1743 | ar = sregs[i].vsi_ar; |
1744 | |
1745 | if (vmwrite(vmm_vmx_sreg_vmcs_fields[i].selid, sel)) |
1746 | goto errout; |
1747 | if (vmwrite(vmm_vmx_sreg_vmcs_fields[i].limitid, limit)) |
1748 | goto errout; |
1749 | if (vmwrite(vmm_vmx_sreg_vmcs_fields[i].arid, ar)) |
1750 | goto errout; |
1751 | if (vmwrite(vmm_vmx_sreg_vmcs_fields[i].baseid, |
1752 | sregs[i].vsi_base)) |
1753 | goto errout; |
1754 | } |
1755 | |
1756 | if (vmwrite(VMCS_GUEST_IA32_GDTR_LIMIT0x4810, |
1757 | vrs->vrs_gdtr.vsi_limit)) |
1758 | goto errout; |
1759 | if (vmwrite(VMCS_GUEST_IA32_GDTR_BASE0x6816, |
1760 | vrs->vrs_gdtr.vsi_base)) |
1761 | goto errout; |
1762 | if (vmwrite(VMCS_GUEST_IA32_IDTR_LIMIT0x4812, |
1763 | vrs->vrs_idtr.vsi_limit)) |
1764 | goto errout; |
1765 | if (vmwrite(VMCS_GUEST_IA32_IDTR_BASE0x6818, |
1766 | vrs->vrs_idtr.vsi_base)) |
1767 | goto errout; |
1768 | } |
1769 | |
1770 | if (regmask & VM_RWREGS_CRS0x4) { |
1771 | vcpu->vc_gueststate.vg_xcr0 = crs[VCPU_REGS_XCR05]; |
1772 | if (vmwrite(VMCS_GUEST_IA32_CR00x6800, crs[VCPU_REGS_CR00])) |
1773 | goto errout; |
1774 | if (vmwrite(VMCS_GUEST_IA32_CR30x6802, crs[VCPU_REGS_CR32])) |
1775 | goto errout; |
1776 | if (vmwrite(VMCS_GUEST_IA32_CR40x6804, crs[VCPU_REGS_CR43])) |
1777 | goto errout; |
1778 | if (vmwrite(VMCS_GUEST_PDPTE00x280A, crs[VCPU_REGS_PDPTE06])) |
1779 | goto errout; |
1780 | if (vmwrite(VMCS_GUEST_PDPTE10x280C, crs[VCPU_REGS_PDPTE17])) |
1781 | goto errout; |
1782 | if (vmwrite(VMCS_GUEST_PDPTE20x280E, crs[VCPU_REGS_PDPTE28])) |
1783 | goto errout; |
1784 | if (vmwrite(VMCS_GUEST_PDPTE30x2810, crs[VCPU_REGS_PDPTE39])) |
1785 | goto errout; |
1786 | } |
1787 | |
1788 | msr_store = (struct vmx_msr_store *)vcpu->vc_vmx_msr_exit_save_va; |
1789 | |
1790 | if (regmask & VM_RWREGS_MSRS0x8) { |
1791 | for (i = 0; i < VCPU_REGS_NMSRS(6 + 1); i++) { |
1792 | msr_store[i].vms_data = msrs[i]; |
1793 | } |
1794 | } |
1795 | |
1796 | if (regmask & VM_RWREGS_DRS0x10) { |
1797 | vcpu->vc_gueststate.vg_dr0 = drs[VCPU_REGS_DR00]; |
1798 | vcpu->vc_gueststate.vg_dr1 = drs[VCPU_REGS_DR11]; |
1799 | vcpu->vc_gueststate.vg_dr2 = drs[VCPU_REGS_DR22]; |
1800 | vcpu->vc_gueststate.vg_dr3 = drs[VCPU_REGS_DR33]; |
1801 | vcpu->vc_gueststate.vg_dr6 = drs[VCPU_REGS_DR64]; |
1802 | if (vmwrite(VMCS_GUEST_IA32_DR70x681A, drs[VCPU_REGS_DR75])) |
1803 | goto errout; |
1804 | } |
1805 | |
1806 | goto out; |
1807 | |
1808 | errout: |
1809 | ret = EINVAL22; |
1810 | out: |
1811 | if (loadvmcs) { |
1812 | if (vmclear(&vcpu->vc_control_pa)) |
1813 | ret = EINVAL22; |
1814 | atomic_swap_uint(&vcpu->vc_vmx_vmcs_state, VMCS_CLEARED)_atomic_swap_uint((&vcpu->vc_vmx_vmcs_state), (0)); |
1815 | } |
1816 | return (ret); |
1817 | } |
1818 | |
1819 | /* |
1820 | * vcpu_writeregs_svm |
1821 | * |
1822 | * Writes 'vcpu's registers |
1823 | * |
1824 | * Parameters: |
1825 | * vcpu: the vcpu that has to get its registers written to |
1826 | * regmask: the types of registers to write |
1827 | * vrs: the register values to write |
1828 | * |
1829 | * Return values: |
1830 | * 0: if successful |
1831 | * EINVAL an error writing registers occurred |
1832 | */ |
1833 | int |
1834 | vcpu_writeregs_svm(struct vcpu *vcpu, uint64_t regmask, |
1835 | struct vcpu_reg_state *vrs) |
1836 | { |
1837 | uint64_t *gprs = vrs->vrs_gprs; |
1838 | uint64_t *crs = vrs->vrs_crs; |
1839 | uint16_t attr; |
1840 | uint64_t *msrs = vrs->vrs_msrs; |
1841 | uint64_t *drs = vrs->vrs_drs; |
1842 | struct vcpu_segment_info *sregs = vrs->vrs_sregs; |
1843 | struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va; |
1844 | |
1845 | if (regmask & VM_RWREGS_GPRS0x1) { |
1846 | vcpu->vc_gueststate.vg_rax = gprs[VCPU_REGS_RAX0]; |
1847 | vcpu->vc_gueststate.vg_rbx = gprs[VCPU_REGS_RBX3]; |
1848 | vcpu->vc_gueststate.vg_rcx = gprs[VCPU_REGS_RCX1]; |
1849 | vcpu->vc_gueststate.vg_rdx = gprs[VCPU_REGS_RDX2]; |
1850 | vcpu->vc_gueststate.vg_rsi = gprs[VCPU_REGS_RSI6]; |
1851 | vcpu->vc_gueststate.vg_rdi = gprs[VCPU_REGS_RDI7]; |
1852 | vcpu->vc_gueststate.vg_r8 = gprs[VCPU_REGS_R88]; |
1853 | vcpu->vc_gueststate.vg_r9 = gprs[VCPU_REGS_R99]; |
1854 | vcpu->vc_gueststate.vg_r10 = gprs[VCPU_REGS_R1010]; |
1855 | vcpu->vc_gueststate.vg_r11 = gprs[VCPU_REGS_R1111]; |
1856 | vcpu->vc_gueststate.vg_r12 = gprs[VCPU_REGS_R1212]; |
1857 | vcpu->vc_gueststate.vg_r13 = gprs[VCPU_REGS_R1313]; |
1858 | vcpu->vc_gueststate.vg_r14 = gprs[VCPU_REGS_R1414]; |
1859 | vcpu->vc_gueststate.vg_r15 = gprs[VCPU_REGS_R1515]; |
1860 | vcpu->vc_gueststate.vg_rbp = gprs[VCPU_REGS_RBP5]; |
1861 | vcpu->vc_gueststate.vg_rip = gprs[VCPU_REGS_RIP16]; |
1862 | |
1863 | vmcb->v_rax = gprs[VCPU_REGS_RAX0]; |
1864 | vmcb->v_rip = gprs[VCPU_REGS_RIP16]; |
1865 | vmcb->v_rsp = gprs[VCPU_REGS_RSP4]; |
1866 | vmcb->v_rflags = gprs[VCPU_REGS_RFLAGS17]; |
1867 | } |
1868 | |
1869 | if (regmask & VM_RWREGS_SREGS0x2) { |
1870 | vmcb->v_cs.vs_sel = sregs[VCPU_REGS_CS1].vsi_sel; |
1871 | vmcb->v_cs.vs_lim = sregs[VCPU_REGS_CS1].vsi_limit; |
1872 | attr = sregs[VCPU_REGS_CS1].vsi_ar; |
1873 | vmcb->v_cs.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00); |
1874 | vmcb->v_cs.vs_base = sregs[VCPU_REGS_CS1].vsi_base; |
1875 | vmcb->v_ds.vs_sel = sregs[VCPU_REGS_DS3].vsi_sel; |
1876 | vmcb->v_ds.vs_lim = sregs[VCPU_REGS_DS3].vsi_limit; |
1877 | attr = sregs[VCPU_REGS_DS3].vsi_ar; |
1878 | vmcb->v_ds.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00); |
1879 | vmcb->v_ds.vs_base = sregs[VCPU_REGS_DS3].vsi_base; |
1880 | vmcb->v_es.vs_sel = sregs[VCPU_REGS_ES0].vsi_sel; |
1881 | vmcb->v_es.vs_lim = sregs[VCPU_REGS_ES0].vsi_limit; |
1882 | attr = sregs[VCPU_REGS_ES0].vsi_ar; |
1883 | vmcb->v_es.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00); |
1884 | vmcb->v_es.vs_base = sregs[VCPU_REGS_ES0].vsi_base; |
1885 | vmcb->v_fs.vs_sel = sregs[VCPU_REGS_FS4].vsi_sel; |
1886 | vmcb->v_fs.vs_lim = sregs[VCPU_REGS_FS4].vsi_limit; |
1887 | attr = sregs[VCPU_REGS_FS4].vsi_ar; |
1888 | vmcb->v_fs.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00); |
1889 | vmcb->v_fs.vs_base = sregs[VCPU_REGS_FS4].vsi_base; |
1890 | vmcb->v_gs.vs_sel = sregs[VCPU_REGS_GS5].vsi_sel; |
1891 | vmcb->v_gs.vs_lim = sregs[VCPU_REGS_GS5].vsi_limit; |
1892 | attr = sregs[VCPU_REGS_GS5].vsi_ar; |
1893 | vmcb->v_gs.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00); |
1894 | vmcb->v_gs.vs_base = sregs[VCPU_REGS_GS5].vsi_base; |
1895 | vmcb->v_ss.vs_sel = sregs[VCPU_REGS_SS2].vsi_sel; |
1896 | vmcb->v_ss.vs_lim = sregs[VCPU_REGS_SS2].vsi_limit; |
1897 | attr = sregs[VCPU_REGS_SS2].vsi_ar; |
1898 | vmcb->v_ss.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00); |
1899 | vmcb->v_ss.vs_base = sregs[VCPU_REGS_SS2].vsi_base; |
1900 | vmcb->v_ldtr.vs_sel = sregs[VCPU_REGS_LDTR6].vsi_sel; |
1901 | vmcb->v_ldtr.vs_lim = sregs[VCPU_REGS_LDTR6].vsi_limit; |
1902 | attr = sregs[VCPU_REGS_LDTR6].vsi_ar; |
1903 | vmcb->v_ldtr.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00); |
1904 | vmcb->v_ldtr.vs_base = sregs[VCPU_REGS_LDTR6].vsi_base; |
1905 | vmcb->v_tr.vs_sel = sregs[VCPU_REGS_TR7].vsi_sel; |
1906 | vmcb->v_tr.vs_lim = sregs[VCPU_REGS_TR7].vsi_limit; |
1907 | attr = sregs[VCPU_REGS_TR7].vsi_ar; |
1908 | vmcb->v_tr.vs_attr = (attr & 0xff) | ((attr >> 4) & 0xf00); |
1909 | vmcb->v_tr.vs_base = sregs[VCPU_REGS_TR7].vsi_base; |
1910 | vmcb->v_gdtr.vs_lim = vrs->vrs_gdtr.vsi_limit; |
1911 | vmcb->v_gdtr.vs_base = vrs->vrs_gdtr.vsi_base; |
1912 | vmcb->v_idtr.vs_lim = vrs->vrs_idtr.vsi_limit; |
1913 | vmcb->v_idtr.vs_base = vrs->vrs_idtr.vsi_base; |
1914 | } |
1915 | |
1916 | if (regmask & VM_RWREGS_CRS0x4) { |
1917 | vmcb->v_cr0 = crs[VCPU_REGS_CR00]; |
1918 | vmcb->v_cr3 = crs[VCPU_REGS_CR32]; |
1919 | vmcb->v_cr4 = crs[VCPU_REGS_CR43]; |
1920 | vcpu->vc_gueststate.vg_cr2 = crs[VCPU_REGS_CR21]; |
1921 | vcpu->vc_gueststate.vg_xcr0 = crs[VCPU_REGS_XCR05]; |
1922 | } |
1923 | |
1924 | if (regmask & VM_RWREGS_MSRS0x8) { |
1925 | vmcb->v_efer |= msrs[VCPU_REGS_EFER0]; |
1926 | vmcb->v_star = msrs[VCPU_REGS_STAR1]; |
1927 | vmcb->v_lstar = msrs[VCPU_REGS_LSTAR2]; |
1928 | vmcb->v_cstar = msrs[VCPU_REGS_CSTAR3]; |
1929 | vmcb->v_sfmask = msrs[VCPU_REGS_SFMASK4]; |
1930 | vmcb->v_kgsbase = msrs[VCPU_REGS_KGSBASE5]; |
1931 | } |
1932 | |
1933 | if (regmask & VM_RWREGS_DRS0x10) { |
1934 | vcpu->vc_gueststate.vg_dr0 = drs[VCPU_REGS_DR00]; |
1935 | vcpu->vc_gueststate.vg_dr1 = drs[VCPU_REGS_DR11]; |
1936 | vcpu->vc_gueststate.vg_dr2 = drs[VCPU_REGS_DR22]; |
1937 | vcpu->vc_gueststate.vg_dr3 = drs[VCPU_REGS_DR33]; |
1938 | vmcb->v_dr6 = drs[VCPU_REGS_DR64]; |
1939 | vmcb->v_dr7 = drs[VCPU_REGS_DR75]; |
1940 | } |
1941 | |
1942 | return (0); |
1943 | } |
1944 | |
1945 | /* |
1946 | * vcpu_reset_regs_svm |
1947 | * |
1948 | * Initializes 'vcpu's registers to supplied state |
1949 | * |
1950 | * Parameters: |
1951 | * vcpu: the vcpu whose register state is to be initialized |
1952 | * vrs: the register state to set |
1953 | * |
1954 | * Return values: |
1955 | * 0: registers init'ed successfully |
1956 | * EINVAL: an error occurred setting register state |
1957 | */ |
1958 | int |
1959 | vcpu_reset_regs_svm(struct vcpu *vcpu, struct vcpu_reg_state *vrs) |
1960 | { |
1961 | struct vmcb *vmcb; |
1962 | int ret; |
1963 | uint16_t asid; |
1964 | |
1965 | vmcb = (struct vmcb *)vcpu->vc_control_va; |
1966 | |
1967 | /* |
1968 | * Intercept controls |
1969 | * |
1970 | * External Interrupt exiting (SVM_INTERCEPT_INTR) |
1971 | * External NMI exiting (SVM_INTERCEPT_NMI) |
1972 | * CPUID instruction (SVM_INTERCEPT_CPUID) |
1973 | * HLT instruction (SVM_INTERCEPT_HLT) |
1974 | * I/O instructions (SVM_INTERCEPT_INOUT) |
1975 | * MSR access (SVM_INTERCEPT_MSR) |
1976 | * shutdown events (SVM_INTERCEPT_SHUTDOWN) |
1977 | * |
1978 | * VMRUN instruction (SVM_INTERCEPT_VMRUN) |
1979 | * VMMCALL instruction (SVM_INTERCEPT_VMMCALL) |
1980 | * VMLOAD instruction (SVM_INTERCEPT_VMLOAD) |
1981 | * VMSAVE instruction (SVM_INTERCEPT_VMSAVE) |
1982 | * STGI instruction (SVM_INTERCEPT_STGI) |
1983 | * CLGI instruction (SVM_INTERCEPT_CLGI) |
1984 | * SKINIT instruction (SVM_INTERCEPT_SKINIT) |
1985 | * ICEBP instruction (SVM_INTERCEPT_ICEBP) |
1986 | * MWAIT instruction (SVM_INTERCEPT_MWAIT_UNCOND) |
1987 | * MWAIT instruction (SVM_INTERCEPT_MWAIT_COND) |
1988 | * MONITOR instruction (SVM_INTERCEPT_MONITOR) |
1989 | * RDTSCP instruction (SVM_INTERCEPT_RDTSCP) |
1990 | * INVLPGA instruction (SVM_INTERCEPT_INVLPGA) |
1991 | * XSETBV instruction (SVM_INTERCEPT_XSETBV) (if available) |
1992 | */ |
1993 | vmcb->v_intercept1 = SVM_INTERCEPT_INTR(1UL << 0) | SVM_INTERCEPT_NMI(1UL << 1) | |
1994 | SVM_INTERCEPT_CPUID(1UL << 18) | SVM_INTERCEPT_HLT(1UL << 24) | SVM_INTERCEPT_INOUT(1UL << 27) | |
1995 | SVM_INTERCEPT_MSR(1UL << 28) | SVM_INTERCEPT_SHUTDOWN(1UL << 31); |
1996 | |
1997 | vmcb->v_intercept2 = SVM_INTERCEPT_VMRUN(1UL << 0) | SVM_INTERCEPT_VMMCALL(1UL << 1) | |
1998 | SVM_INTERCEPT_VMLOAD(1UL << 2) | SVM_INTERCEPT_VMSAVE(1UL << 3) | SVM_INTERCEPT_STGI(1UL << 4) | |
1999 | SVM_INTERCEPT_CLGI(1UL << 5) | SVM_INTERCEPT_SKINIT(1UL << 6) | SVM_INTERCEPT_ICEBP(1UL << 8) | |
2000 | SVM_INTERCEPT_MWAIT_UNCOND(1UL << 11) | SVM_INTERCEPT_MONITOR(1UL << 10) | |
2001 | SVM_INTERCEPT_MWAIT_COND(1UL << 12) | SVM_INTERCEPT_RDTSCP(1UL << 7) | |
2002 | SVM_INTERCEPT_INVLPGA(1UL << 26); |
2003 | |
2004 | if (xsave_mask) |
2005 | vmcb->v_intercept2 |= SVM_INTERCEPT_XSETBV(1UL << 13); |
2006 | |
2007 | /* Setup I/O bitmap */ |
2008 | memset((uint8_t *)vcpu->vc_svm_ioio_va, 0xFF, 3 * PAGE_SIZE)__builtin_memset(((uint8_t *)vcpu->vc_svm_ioio_va), (0xFF) , (3 * (1 << 12))); |
2009 | vmcb->v_iopm_pa = (uint64_t)(vcpu->vc_svm_ioio_pa); |
2010 | |
2011 | /* Setup MSR bitmap */ |
2012 | memset((uint8_t *)vcpu->vc_msr_bitmap_va, 0xFF, 2 * PAGE_SIZE)__builtin_memset(((uint8_t *)vcpu->vc_msr_bitmap_va), (0xFF ), (2 * (1 << 12))); |
2013 | vmcb->v_msrpm_pa = (uint64_t)(vcpu->vc_msr_bitmap_pa); |
2014 | svm_setmsrbrw(vcpu, MSR_IA32_FEATURE_CONTROL0x03a); |
2015 | svm_setmsrbrw(vcpu, MSR_SYSENTER_CS0x174); |
2016 | svm_setmsrbrw(vcpu, MSR_SYSENTER_ESP0x175); |
2017 | svm_setmsrbrw(vcpu, MSR_SYSENTER_EIP0x176); |
2018 | svm_setmsrbrw(vcpu, MSR_STAR0xc0000081); |
2019 | svm_setmsrbrw(vcpu, MSR_LSTAR0xc0000082); |
2020 | svm_setmsrbrw(vcpu, MSR_CSTAR0xc0000083); |
2021 | svm_setmsrbrw(vcpu, MSR_SFMASK0xc0000084); |
2022 | svm_setmsrbrw(vcpu, MSR_FSBASE0xc0000100); |
2023 | svm_setmsrbrw(vcpu, MSR_GSBASE0xc0000101); |
2024 | svm_setmsrbrw(vcpu, MSR_KERNELGSBASE0xc0000102); |
2025 | |
2026 | /* EFER is R/O so we can ensure the guest always has SVME */ |
2027 | svm_setmsrbr(vcpu, MSR_EFER0xc0000080); |
2028 | |
2029 | /* allow reading TSC */ |
2030 | svm_setmsrbr(vcpu, MSR_TSC0x010); |
2031 | |
2032 | /* allow reading HWCR and PSTATEDEF to determine TSC frequency */ |
2033 | svm_setmsrbr(vcpu, MSR_HWCR0xc0010015); |
2034 | svm_setmsrbr(vcpu, MSR_PSTATEDEF(0)(0xc0010064 + (0))); |
2035 | |
2036 | /* Guest VCPU ASID */ |
2037 | if (vmm_alloc_vpid(&asid)) { |
2038 | DPRINTF("%s: could not allocate asid\n", __func__); |
2039 | ret = EINVAL22; |
2040 | goto exit; |
2041 | } |
2042 | |
2043 | vmcb->v_asid = asid; |
2044 | vcpu->vc_vpid = asid; |
2045 | |
2046 | /* TLB Control - First time in, flush all*/ |
2047 | vmcb->v_tlb_control = SVM_TLB_CONTROL_FLUSH_ALL1; |
2048 | |
2049 | /* INTR masking */ |
2050 | vmcb->v_intr_masking = 1; |
2051 | |
2052 | /* PAT */ |
2053 | vmcb->v_g_pat = PATENTRY(0, PAT_WB)(0x6UL << ((0) * 8)) | PATENTRY(1, PAT_WC)(0x1UL << ((1) * 8)) | |
2054 | PATENTRY(2, PAT_UCMINUS)(0x7UL << ((2) * 8)) | PATENTRY(3, PAT_UC)(0x0UL << ((3) * 8)) | |
2055 | PATENTRY(4, PAT_WB)(0x6UL << ((4) * 8)) | PATENTRY(5, PAT_WC)(0x1UL << ((5) * 8)) | |
2056 | PATENTRY(6, PAT_UCMINUS)(0x7UL << ((6) * 8)) | PATENTRY(7, PAT_UC)(0x0UL << ((7) * 8)); |
2057 | |
2058 | /* NPT */ |
2059 | if (vmm_softc->mode == VMM_MODE_RVI) { |
2060 | vmcb->v_np_enable = 1; |
2061 | vmcb->v_n_cr3 = vcpu->vc_parent->vm_map->pmap->pm_pdirpa; |
2062 | } |
2063 | |
2064 | /* Enable SVME in EFER (must always be set) */ |
2065 | vmcb->v_efer |= EFER_SVME0x00001000; |
2066 | |
2067 | ret = vcpu_writeregs_svm(vcpu, VM_RWREGS_ALL(0x1 | 0x2 | 0x4 | 0x8 | 0x10), vrs); |
2068 | |
2069 | /* xcr0 power on default sets bit 0 (x87 state) */ |
2070 | vcpu->vc_gueststate.vg_xcr0 = XFEATURE_X870x00000001 & xsave_mask; |
2071 | |
2072 | vcpu->vc_parent->vm_map->pmap->eptp = 0; |
2073 | |
2074 | exit: |
2075 | return ret; |
2076 | } |
2077 | |
2078 | /* |
2079 | * svm_setmsrbr |
2080 | * |
2081 | * Allow read access to the specified msr on the supplied vcpu. |
2082 | * |
2083 | * Parameters: |
2084 | * vcpu: the VCPU to allow access |
2085 | * msr: the MSR number to allow access to |
2086 | */ |
2087 | void |
2088 | svm_setmsrbr(struct vcpu *vcpu, uint32_t msr) |
2089 | { |
2090 | uint8_t *msrs; |
2091 | uint16_t idx; |
2092 | |
2093 | msrs = (uint8_t *)vcpu->vc_msr_bitmap_va; |
2094 | |
2095 | /* |
2096 | * MSR Read bitmap layout: |
2097 | * Pentium MSRs (0x0 - 0x1fff) @ 0x0 |
2098 | * Gen6 and Syscall MSRs (0xc0000000 - 0xc0001fff) @ 0x800 |
2099 | * Gen7 and Gen8 MSRs (0xc0010000 - 0xc0011fff) @ 0x1000 |
2100 | * |
2101 | * Read enable bit is low order bit of 2-bit pair |
2102 | * per MSR (eg, MSR 0x0 write bit is at bit 0 @ 0x0) |
2103 | */ |
2104 | if (msr <= 0x1fff) { |
2105 | idx = SVM_MSRIDX(msr)((msr) / 4); |
2106 | msrs[idx] &= ~(SVM_MSRBIT_R(msr)(1 << (((msr) % 4) * 2))); |
2107 | } else if (msr >= 0xc0000000 && msr <= 0xc0001fff) { |
2108 | idx = SVM_MSRIDX(msr - 0xc0000000)((msr - 0xc0000000) / 4) + 0x800; |
2109 | msrs[idx] &= ~(SVM_MSRBIT_R(msr - 0xc0000000)(1 << (((msr - 0xc0000000) % 4) * 2))); |
2110 | } else if (msr >= 0xc0010000 && msr <= 0xc0011fff) { |
2111 | idx = SVM_MSRIDX(msr - 0xc0010000)((msr - 0xc0010000) / 4) + 0x1000; |
2112 | msrs[idx] &= ~(SVM_MSRBIT_R(msr - 0xc0010000)(1 << (((msr - 0xc0010000) % 4) * 2))); |
2113 | } else { |
2114 | printf("%s: invalid msr 0x%x\n", __func__, msr); |
2115 | return; |
2116 | } |
2117 | } |
2118 | |
2119 | /* |
2120 | * svm_setmsrbw |
2121 | * |
2122 | * Allow write access to the specified msr on the supplied vcpu |
2123 | * |
2124 | * Parameters: |
2125 | * vcpu: the VCPU to allow access |
2126 | * msr: the MSR number to allow access to |
2127 | */ |
2128 | void |
2129 | svm_setmsrbw(struct vcpu *vcpu, uint32_t msr) |
2130 | { |
2131 | uint8_t *msrs; |
2132 | uint16_t idx; |
2133 | |
2134 | msrs = (uint8_t *)vcpu->vc_msr_bitmap_va; |
2135 | |
2136 | /* |
2137 | * MSR Write bitmap layout: |
2138 | * Pentium MSRs (0x0 - 0x1fff) @ 0x0 |
2139 | * Gen6 and Syscall MSRs (0xc0000000 - 0xc0001fff) @ 0x800 |
2140 | * Gen7 and Gen8 MSRs (0xc0010000 - 0xc0011fff) @ 0x1000 |
2141 | * |
2142 | * Write enable bit is high order bit of 2-bit pair |
2143 | * per MSR (eg, MSR 0x0 write bit is at bit 1 @ 0x0) |
2144 | */ |
2145 | if (msr <= 0x1fff) { |
2146 | idx = SVM_MSRIDX(msr)((msr) / 4); |
2147 | msrs[idx] &= ~(SVM_MSRBIT_W(msr)(1 << (((msr) % 4) * 2 + 1))); |
2148 | } else if (msr >= 0xc0000000 && msr <= 0xc0001fff) { |
2149 | idx = SVM_MSRIDX(msr - 0xc0000000)((msr - 0xc0000000) / 4) + 0x800; |
2150 | msrs[idx] &= ~(SVM_MSRBIT_W(msr - 0xc0000000)(1 << (((msr - 0xc0000000) % 4) * 2 + 1))); |
2151 | } else if (msr >= 0xc0010000 && msr <= 0xc0011fff) { |
2152 | idx = SVM_MSRIDX(msr - 0xc0010000)((msr - 0xc0010000) / 4) + 0x1000; |
2153 | msrs[idx] &= ~(SVM_MSRBIT_W(msr - 0xc0010000)(1 << (((msr - 0xc0010000) % 4) * 2 + 1))); |
2154 | } else { |
2155 | printf("%s: invalid msr 0x%x\n", __func__, msr); |
2156 | return; |
2157 | } |
2158 | } |
2159 | |
2160 | /* |
2161 | * svm_setmsrbrw |
2162 | * |
2163 | * Allow read/write access to the specified msr on the supplied vcpu |
2164 | * |
2165 | * Parameters: |
2166 | * vcpu: the VCPU to allow access |
2167 | * msr: the MSR number to allow access to |
2168 | */ |
2169 | void |
2170 | svm_setmsrbrw(struct vcpu *vcpu, uint32_t msr) |
2171 | { |
2172 | svm_setmsrbr(vcpu, msr); |
2173 | svm_setmsrbw(vcpu, msr); |
2174 | } |
2175 | |
2176 | /* |
2177 | * vmx_setmsrbr |
2178 | * |
2179 | * Allow read access to the specified msr on the supplied vcpu. |
2180 | * |
2181 | * Parameters: |
2182 | * vcpu: the VCPU to allow access |
2183 | * msr: the MSR number to allow access to |
2184 | */ |
2185 | void |
2186 | vmx_setmsrbr(struct vcpu *vcpu, uint32_t msr) |
2187 | { |
2188 | uint8_t *msrs; |
2189 | uint16_t idx; |
2190 | |
2191 | msrs = (uint8_t *)vcpu->vc_msr_bitmap_va; |
2192 | |
2193 | /* |
2194 | * MSR Read bitmap layout: |
2195 | * "Low" MSRs (0x0 - 0x1fff) @ 0x0 |
2196 | * "High" MSRs (0xc0000000 - 0xc0001fff) @ 0x400 |
2197 | */ |
2198 | if (msr <= 0x1fff) { |
2199 | idx = VMX_MSRIDX(msr)((msr) / 8); |
2200 | msrs[idx] &= ~(VMX_MSRBIT(msr)(1 << (msr) % 8)); |
2201 | } else if (msr >= 0xc0000000 && msr <= 0xc0001fff) { |
2202 | idx = VMX_MSRIDX(msr - 0xc0000000)((msr - 0xc0000000) / 8) + 0x400; |
2203 | msrs[idx] &= ~(VMX_MSRBIT(msr - 0xc0000000)(1 << (msr - 0xc0000000) % 8)); |
2204 | } else |
2205 | printf("%s: invalid msr 0x%x\n", __func__, msr); |
2206 | } |
2207 | |
2208 | /* |
2209 | * vmx_setmsrbw |
2210 | * |
2211 | * Allow write access to the specified msr on the supplied vcpu |
2212 | * |
2213 | * Parameters: |
2214 | * vcpu: the VCPU to allow access |
2215 | * msr: the MSR number to allow access to |
2216 | */ |
2217 | void |
2218 | vmx_setmsrbw(struct vcpu *vcpu, uint32_t msr) |
2219 | { |
2220 | uint8_t *msrs; |
2221 | uint16_t idx; |
2222 | |
2223 | msrs = (uint8_t *)vcpu->vc_msr_bitmap_va; |
2224 | |
2225 | /* |
2226 | * MSR Write bitmap layout: |
2227 | * "Low" MSRs (0x0 - 0x1fff) @ 0x800 |
2228 | * "High" MSRs (0xc0000000 - 0xc0001fff) @ 0xc00 |
2229 | */ |
2230 | if (msr <= 0x1fff) { |
2231 | idx = VMX_MSRIDX(msr)((msr) / 8) + 0x800; |
2232 | msrs[idx] &= ~(VMX_MSRBIT(msr)(1 << (msr) % 8)); |
2233 | } else if (msr >= 0xc0000000 && msr <= 0xc0001fff) { |
2234 | idx = VMX_MSRIDX(msr - 0xc0000000)((msr - 0xc0000000) / 8) + 0xc00; |
2235 | msrs[idx] &= ~(VMX_MSRBIT(msr - 0xc0000000)(1 << (msr - 0xc0000000) % 8)); |
2236 | } else |
2237 | printf("%s: invalid msr 0x%x\n", __func__, msr); |
2238 | } |
2239 | |
2240 | /* |
2241 | * vmx_setmsrbrw |
2242 | * |
2243 | * Allow read/write access to the specified msr on the supplied vcpu |
2244 | * |
2245 | * Parameters: |
2246 | * vcpu: the VCPU to allow access |
2247 | * msr: the MSR number to allow access to |
2248 | */ |
2249 | void |
2250 | vmx_setmsrbrw(struct vcpu *vcpu, uint32_t msr) |
2251 | { |
2252 | vmx_setmsrbr(vcpu, msr); |
2253 | vmx_setmsrbw(vcpu, msr); |
2254 | } |
2255 | |
2256 | /* |
2257 | * svm_set_clean |
2258 | * |
2259 | * Sets (mark as unmodified) the VMCB clean bit set in 'value'. |
2260 | * For example, to set the clean bit for the VMCB intercepts (bit position 0), |
2261 | * the caller provides 'SVM_CLEANBITS_I' (0x1) for the 'value' argument. |
2262 | * Multiple cleanbits can be provided in 'value' at the same time (eg, |
2263 | * "SVM_CLEANBITS_I | SVM_CLEANBITS_TPR"). |
2264 | * |
2265 | * Note that this function does not clear any bits; to clear bits in the |
2266 | * vmcb cleanbits bitfield, use 'svm_set_dirty'. |
2267 | * |
2268 | * Parameters: |
2269 | * vmcs: the VCPU whose VMCB clean value should be set |
2270 | * value: the value(s) to enable in the cleanbits mask |
2271 | */ |
2272 | void |
2273 | svm_set_clean(struct vcpu *vcpu, uint32_t value) |
2274 | { |
2275 | struct vmcb *vmcb; |
2276 | |
2277 | /* If no cleanbits support, do nothing */ |
2278 | if (!curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_svm.svm_vmcb_clean) |
2279 | return; |
2280 | |
2281 | vmcb = (struct vmcb *)vcpu->vc_control_va; |
2282 | |
2283 | vmcb->v_vmcb_clean_bits |= value; |
2284 | } |
2285 | |
2286 | /* |
2287 | * svm_set_dirty |
2288 | * |
2289 | * Clears (mark as modified) the VMCB clean bit set in 'value'. |
2290 | * For example, to clear the bit for the VMCB intercepts (bit position 0) |
2291 | * the caller provides 'SVM_CLEANBITS_I' (0x1) for the 'value' argument. |
2292 | * Multiple dirty bits can be provided in 'value' at the same time (eg, |
2293 | * "SVM_CLEANBITS_I | SVM_CLEANBITS_TPR"). |
2294 | * |
2295 | * Parameters: |
2296 | * vmcs: the VCPU whose VMCB dirty value should be set |
2297 | * value: the value(s) to dirty in the cleanbits mask |
2298 | */ |
2299 | void |
2300 | svm_set_dirty(struct vcpu *vcpu, uint32_t value) |
2301 | { |
2302 | struct vmcb *vmcb; |
2303 | |
2304 | /* If no cleanbits support, do nothing */ |
2305 | if (!curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_svm.svm_vmcb_clean) |
2306 | return; |
2307 | |
2308 | vmcb = (struct vmcb *)vcpu->vc_control_va; |
2309 | |
2310 | vmcb->v_vmcb_clean_bits &= ~value; |
2311 | } |
2312 | |
2313 | /* |
2314 | * vcpu_reset_regs_vmx |
2315 | * |
2316 | * Initializes 'vcpu's registers to supplied state |
2317 | * |
2318 | * Parameters: |
2319 | * vcpu: the vcpu whose register state is to be initialized |
2320 | * vrs: the register state to set |
2321 | * |
2322 | * Return values: |
2323 | * 0: registers init'ed successfully |
2324 | * EINVAL: an error occurred setting register state |
2325 | */ |
2326 | int |
2327 | vcpu_reset_regs_vmx(struct vcpu *vcpu, struct vcpu_reg_state *vrs) |
2328 | { |
2329 | int ret = 0, ug = 0; |
2330 | uint32_t cr0, cr4; |
2331 | uint32_t pinbased, procbased, procbased2, exit, entry; |
2332 | uint32_t want1, want0; |
2333 | uint64_t ctrlval, cr3; |
2334 | uint16_t ctrl, vpid; |
2335 | struct vmx_msr_store *msr_store; |
2336 | |
2337 | rw_assert_wrlock(&vcpu->vc_lock); |
2338 | |
2339 | cr0 = vrs->vrs_crs[VCPU_REGS_CR00]; |
2340 | |
2341 | if (vcpu_reload_vmcs_vmx(vcpu)) { |
2342 | DPRINTF("%s: error reloading VMCS\n", __func__); |
2343 | ret = EINVAL22; |
2344 | goto exit; |
2345 | } |
2346 | |
2347 | #ifdef VMM_DEBUG |
2348 | /* VMCS should be loaded... */ |
2349 | paddr_t pa = 0ULL; |
2350 | if (vmptrst(&pa)) |
2351 | panic("%s: vmptrst", __func__); |
2352 | KASSERT(pa == vcpu->vc_control_pa)((pa == vcpu->vc_control_pa) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/arch/amd64/amd64/vmm_machdep.c", 2352, "pa == vcpu->vc_control_pa" )); |
2353 | #endif /* VMM_DEBUG */ |
2354 | |
2355 | /* Compute Basic Entry / Exit Controls */ |
2356 | vcpu->vc_vmx_basic = rdmsr(IA32_VMX_BASIC0x480); |
2357 | vcpu->vc_vmx_entry_ctls = rdmsr(IA32_VMX_ENTRY_CTLS0x484); |
2358 | vcpu->vc_vmx_exit_ctls = rdmsr(IA32_VMX_EXIT_CTLS0x483); |
2359 | vcpu->vc_vmx_pinbased_ctls = rdmsr(IA32_VMX_PINBASED_CTLS0x481); |
2360 | vcpu->vc_vmx_procbased_ctls = rdmsr(IA32_VMX_PROCBASED_CTLS0x482); |
2361 | |
2362 | /* Compute True Entry / Exit Controls (if applicable) */ |
2363 | if (vcpu->vc_vmx_basic & IA32_VMX_TRUE_CTLS_AVAIL(1ULL << 55)) { |
2364 | vcpu->vc_vmx_true_entry_ctls = rdmsr(IA32_VMX_TRUE_ENTRY_CTLS0x490); |
2365 | vcpu->vc_vmx_true_exit_ctls = rdmsr(IA32_VMX_TRUE_EXIT_CTLS0x48F); |
2366 | vcpu->vc_vmx_true_pinbased_ctls = |
2367 | rdmsr(IA32_VMX_TRUE_PINBASED_CTLS0x48D); |
2368 | vcpu->vc_vmx_true_procbased_ctls = |
2369 | rdmsr(IA32_VMX_TRUE_PROCBASED_CTLS0x48E); |
2370 | } |
2371 | |
2372 | /* Compute Secondary Procbased Controls (if applicable) */ |
2373 | if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED_CTLS0x482, |
2374 | IA32_VMX_ACTIVATE_SECONDARY_CONTROLS(1ULL << 31), 1)) |
2375 | vcpu->vc_vmx_procbased2_ctls = rdmsr(IA32_VMX_PROCBASED2_CTLS0x48B); |
2376 | |
2377 | /* |
2378 | * Pinbased ctrls |
2379 | * |
2380 | * We must be able to set the following: |
2381 | * IA32_VMX_EXTERNAL_INT_EXITING - exit on host interrupt |
2382 | * IA32_VMX_NMI_EXITING - exit on host NMI |
2383 | */ |
2384 | want1 = IA32_VMX_EXTERNAL_INT_EXITING(1ULL << 0) | |
2385 | IA32_VMX_NMI_EXITING(1ULL << 3); |
2386 | want0 = 0; |
2387 | |
2388 | if (vcpu->vc_vmx_basic & IA32_VMX_TRUE_CTLS_AVAIL(1ULL << 55)) { |
2389 | ctrl = IA32_VMX_TRUE_PINBASED_CTLS0x48D; |
2390 | ctrlval = vcpu->vc_vmx_true_pinbased_ctls; |
2391 | } else { |
2392 | ctrl = IA32_VMX_PINBASED_CTLS0x481; |
2393 | ctrlval = vcpu->vc_vmx_pinbased_ctls; |
2394 | } |
2395 | |
2396 | if (vcpu_vmx_compute_ctrl(ctrlval, ctrl, want1, want0, &pinbased)) { |
2397 | DPRINTF("%s: error computing pinbased controls\n", __func__); |
2398 | ret = EINVAL22; |
2399 | goto exit; |
2400 | } |
2401 | |
2402 | if (vmwrite(VMCS_PINBASED_CTLS0x4000, pinbased)) { |
2403 | DPRINTF("%s: error setting pinbased controls\n", __func__); |
2404 | ret = EINVAL22; |
2405 | goto exit; |
2406 | } |
2407 | |
2408 | /* |
2409 | * Procbased ctrls |
2410 | * |
2411 | * We must be able to set the following: |
2412 | * IA32_VMX_HLT_EXITING - exit on HLT instruction |
2413 | * IA32_VMX_MWAIT_EXITING - exit on MWAIT instruction |
2414 | * IA32_VMX_UNCONDITIONAL_IO_EXITING - exit on I/O instructions |
2415 | * IA32_VMX_USE_MSR_BITMAPS - exit on various MSR accesses |
2416 | * IA32_VMX_CR8_LOAD_EXITING - guest TPR access |
2417 | * IA32_VMX_CR8_STORE_EXITING - guest TPR access |
2418 | * IA32_VMX_USE_TPR_SHADOW - guest TPR access (shadow) |
2419 | * IA32_VMX_MONITOR_EXITING - exit on MONITOR instruction |
2420 | * |
2421 | * If we have EPT, we must be able to clear the following |
2422 | * IA32_VMX_CR3_LOAD_EXITING - don't care about guest CR3 accesses |
2423 | * IA32_VMX_CR3_STORE_EXITING - don't care about guest CR3 accesses |
2424 | */ |
2425 | want1 = IA32_VMX_HLT_EXITING(1ULL << 7) | |
2426 | IA32_VMX_MWAIT_EXITING(1ULL << 10) | |
2427 | IA32_VMX_UNCONDITIONAL_IO_EXITING(1ULL << 24) | |
2428 | IA32_VMX_USE_MSR_BITMAPS(1ULL << 28) | |
2429 | IA32_VMX_CR8_LOAD_EXITING(1ULL << 19) | |
2430 | IA32_VMX_CR8_STORE_EXITING(1ULL << 20) | |
2431 | IA32_VMX_MONITOR_EXITING(1ULL << 29) | |
2432 | IA32_VMX_USE_TPR_SHADOW(1ULL << 21); |
2433 | want0 = 0; |
2434 | |
2435 | if (vmm_softc->mode == VMM_MODE_EPT) { |
2436 | want1 |= IA32_VMX_ACTIVATE_SECONDARY_CONTROLS(1ULL << 31); |
2437 | want0 |= IA32_VMX_CR3_LOAD_EXITING(1ULL << 15) | |
2438 | IA32_VMX_CR3_STORE_EXITING(1ULL << 16); |
2439 | } |
2440 | |
2441 | if (vcpu->vc_vmx_basic & IA32_VMX_TRUE_CTLS_AVAIL(1ULL << 55)) { |
2442 | ctrl = IA32_VMX_TRUE_PROCBASED_CTLS0x48E; |
2443 | ctrlval = vcpu->vc_vmx_true_procbased_ctls; |
2444 | } else { |
2445 | ctrl = IA32_VMX_PROCBASED_CTLS0x482; |
2446 | ctrlval = vcpu->vc_vmx_procbased_ctls; |
2447 | } |
2448 | |
2449 | if (vcpu_vmx_compute_ctrl(ctrlval, ctrl, want1, want0, &procbased)) { |
2450 | DPRINTF("%s: error computing procbased controls\n", __func__); |
2451 | ret = EINVAL22; |
2452 | goto exit; |
2453 | } |
2454 | |
2455 | if (vmwrite(VMCS_PROCBASED_CTLS0x4002, procbased)) { |
2456 | DPRINTF("%s: error setting procbased controls\n", __func__); |
2457 | ret = EINVAL22; |
2458 | goto exit; |
2459 | } |
2460 | |
2461 | /* |
2462 | * Secondary Procbased ctrls |
2463 | * |
2464 | * We want to be able to set the following, if available: |
2465 | * IA32_VMX_ENABLE_VPID - use VPIDs where available |
2466 | * |
2467 | * If we have EPT, we must be able to set the following: |
2468 | * IA32_VMX_ENABLE_EPT - enable EPT |
2469 | * |
2470 | * If we have unrestricted guest capability, we must be able to set |
2471 | * the following: |
2472 | * IA32_VMX_UNRESTRICTED_GUEST - enable unrestricted guest (if caller |
2473 | * specified CR0_PG | CR0_PE in %cr0 in the 'vrs' parameter) |
2474 | */ |
2475 | want1 = 0; |
2476 | |
2477 | /* XXX checking for 2ndary controls can be combined here */ |
2478 | if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED_CTLS0x482, |
2479 | IA32_VMX_ACTIVATE_SECONDARY_CONTROLS(1ULL << 31), 1)) { |
2480 | if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS0x48B, |
2481 | IA32_VMX_ENABLE_VPID(1ULL << 5), 1)) { |
2482 | want1 |= IA32_VMX_ENABLE_VPID(1ULL << 5); |
2483 | vcpu->vc_vmx_vpid_enabled = 1; |
2484 | } |
2485 | } |
2486 | |
2487 | if (vmm_softc->mode == VMM_MODE_EPT) |
2488 | want1 |= IA32_VMX_ENABLE_EPT(1ULL << 1); |
2489 | |
2490 | if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED_CTLS0x482, |
2491 | IA32_VMX_ACTIVATE_SECONDARY_CONTROLS(1ULL << 31), 1)) { |
2492 | if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS0x48B, |
2493 | IA32_VMX_UNRESTRICTED_GUEST(1ULL << 7), 1)) { |
2494 | if ((cr0 & (CR0_PE0x00000001 | CR0_PG0x80000000)) == 0) { |
2495 | want1 |= IA32_VMX_UNRESTRICTED_GUEST(1ULL << 7); |
2496 | ug = 1; |
2497 | } |
2498 | } |
2499 | } |
2500 | |
2501 | want0 = ~want1; |
2502 | ctrlval = vcpu->vc_vmx_procbased2_ctls; |
2503 | ctrl = IA32_VMX_PROCBASED2_CTLS0x48B; |
2504 | |
2505 | if (vcpu_vmx_compute_ctrl(ctrlval, ctrl, want1, want0, &procbased2)) { |
2506 | DPRINTF("%s: error computing secondary procbased controls\n", |
2507 | __func__); |
2508 | ret = EINVAL22; |
2509 | goto exit; |
2510 | } |
2511 | |
2512 | if (vmwrite(VMCS_PROCBASED2_CTLS0x401E, procbased2)) { |
2513 | DPRINTF("%s: error setting secondary procbased controls\n", |
2514 | __func__); |
2515 | ret = EINVAL22; |
2516 | goto exit; |
2517 | } |
2518 | |
2519 | /* |
2520 | * Exit ctrls |
2521 | * |
2522 | * We must be able to set the following: |
2523 | * IA32_VMX_SAVE_DEBUG_CONTROLS |
2524 | * IA32_VMX_HOST_SPACE_ADDRESS_SIZE - exit to long mode |
2525 | * IA32_VMX_ACKNOWLEDGE_INTERRUPT_ON_EXIT - ack interrupt on exit |
2526 | */ |
2527 | want1 = IA32_VMX_HOST_SPACE_ADDRESS_SIZE(1ULL << 9) | |
2528 | IA32_VMX_ACKNOWLEDGE_INTERRUPT_ON_EXIT(1ULL << 15) | |
2529 | IA32_VMX_SAVE_DEBUG_CONTROLS(1ULL << 2); |
2530 | want0 = 0; |
2531 | |
2532 | if (vcpu->vc_vmx_basic & IA32_VMX_TRUE_CTLS_AVAIL(1ULL << 55)) { |
2533 | ctrl = IA32_VMX_TRUE_EXIT_CTLS0x48F; |
2534 | ctrlval = vcpu->vc_vmx_true_exit_ctls; |
2535 | } else { |
2536 | ctrl = IA32_VMX_EXIT_CTLS0x483; |
2537 | ctrlval = vcpu->vc_vmx_exit_ctls; |
2538 | } |
2539 | |
2540 | if (rcr4() & CR4_CET0x00800000) |
2541 | want1 |= IA32_VMX_LOAD_HOST_CET_STATE(1ULL << 28); |
2542 | else |
2543 | want0 |= IA32_VMX_LOAD_HOST_CET_STATE(1ULL << 28); |
2544 | |
2545 | if (vcpu_vmx_compute_ctrl(ctrlval, ctrl, want1, want0, &exit)) { |
2546 | DPRINTF("%s: error computing exit controls\n", __func__); |
2547 | ret = EINVAL22; |
2548 | goto exit; |
2549 | } |
2550 | |
2551 | if (vmwrite(VMCS_EXIT_CTLS0x400C, exit)) { |
2552 | DPRINTF("%s: error setting exit controls\n", __func__); |
2553 | ret = EINVAL22; |
2554 | goto exit; |
2555 | } |
2556 | |
2557 | /* |
2558 | * Entry ctrls |
2559 | * |
2560 | * We must be able to set the following: |
2561 | * IA32_VMX_IA32E_MODE_GUEST (if no unrestricted guest) |
2562 | * IA32_VMX_LOAD_DEBUG_CONTROLS |
2563 | * We must be able to clear the following: |
2564 | * IA32_VMX_ENTRY_TO_SMM - enter to SMM |
2565 | * IA32_VMX_DEACTIVATE_DUAL_MONITOR_TREATMENT |
2566 | * IA32_VMX_LOAD_IA32_PERF_GLOBAL_CTRL_ON_ENTRY |
2567 | */ |
2568 | want1 = IA32_VMX_LOAD_DEBUG_CONTROLS(1ULL << 2); |
2569 | if (vrs->vrs_msrs[VCPU_REGS_EFER0] & EFER_LMA0x00000400) |
2570 | want1 |= IA32_VMX_IA32E_MODE_GUEST(1ULL << 9); |
2571 | |
2572 | want0 = IA32_VMX_ENTRY_TO_SMM(1ULL << 10) | |
2573 | IA32_VMX_DEACTIVATE_DUAL_MONITOR_TREATMENT(1ULL << 11) | |
2574 | IA32_VMX_LOAD_IA32_PERF_GLOBAL_CTRL_ON_ENTRY(1ULL << 13); |
2575 | |
2576 | if (vcpu->vc_vmx_basic & IA32_VMX_TRUE_CTLS_AVAIL(1ULL << 55)) { |
2577 | ctrl = IA32_VMX_TRUE_ENTRY_CTLS0x490; |
2578 | ctrlval = vcpu->vc_vmx_true_entry_ctls; |
2579 | } else { |
2580 | ctrl = IA32_VMX_ENTRY_CTLS0x484; |
2581 | ctrlval = vcpu->vc_vmx_entry_ctls; |
2582 | } |
2583 | |
2584 | if (rcr4() & CR4_CET0x00800000) |
2585 | want1 |= IA32_VMX_LOAD_GUEST_CET_STATE(1ULL << 20); |
2586 | else |
2587 | want0 |= IA32_VMX_LOAD_GUEST_CET_STATE(1ULL << 20); |
2588 | |
2589 | if (vcpu_vmx_compute_ctrl(ctrlval, ctrl, want1, want0, &entry)) { |
2590 | ret = EINVAL22; |
2591 | goto exit; |
2592 | } |
2593 | |
2594 | if (vmwrite(VMCS_ENTRY_CTLS0x4012, entry)) { |
2595 | ret = EINVAL22; |
2596 | goto exit; |
2597 | } |
2598 | |
2599 | if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED_CTLS0x482, |
2600 | IA32_VMX_ACTIVATE_SECONDARY_CONTROLS(1ULL << 31), 1)) { |
2601 | if (vcpu_vmx_check_cap(vcpu, IA32_VMX_PROCBASED2_CTLS0x48B, |
2602 | IA32_VMX_ENABLE_VPID(1ULL << 5), 1)) { |
2603 | |
2604 | /* We may sleep during allocation, so reload VMCS. */ |
2605 | vcpu->vc_last_pcpu = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;}); |
2606 | ret = vmm_alloc_vpid(&vpid); |
2607 | if (vcpu_reload_vmcs_vmx(vcpu)) { |
2608 | printf("%s: failed to reload vmcs\n", __func__); |
2609 | ret = EINVAL22; |
2610 | goto exit; |
2611 | } |
2612 | if (ret) { |
2613 | DPRINTF("%s: could not allocate VPID\n", |
2614 | __func__); |
2615 | ret = EINVAL22; |
2616 | goto exit; |
2617 | } |
2618 | |
2619 | if (vmwrite(VMCS_GUEST_VPID0x0000, vpid)) { |
2620 | DPRINTF("%s: error setting guest VPID\n", |
2621 | __func__); |
2622 | ret = EINVAL22; |
2623 | goto exit; |
2624 | } |
2625 | |
2626 | vcpu->vc_vpid = vpid; |
2627 | } |
2628 | } |
2629 | |
2630 | /* |
2631 | * Determine which bits in CR0 have to be set to a fixed |
2632 | * value as per Intel SDM A.7. |
2633 | * CR0 bits in the vrs parameter must match these. |
2634 | */ |
2635 | want1 = (curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_vmx.vmx_cr0_fixed0) & |
2636 | (curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_vmx.vmx_cr0_fixed1); |
2637 | want0 = ~(curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_vmx.vmx_cr0_fixed0) & |
2638 | ~(curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_vmx.vmx_cr0_fixed1); |
2639 | |
2640 | /* |
2641 | * CR0_FIXED0 and CR0_FIXED1 may report the CR0_PG and CR0_PE bits as |
2642 | * fixed to 1 even if the CPU supports the unrestricted guest |
2643 | * feature. Update want1 and want0 accordingly to allow |
2644 | * any value for CR0_PG and CR0_PE in vrs->vrs_crs[VCPU_REGS_CR0] if |
2645 | * the CPU has the unrestricted guest capability. |
2646 | */ |
2647 | if (ug) { |
2648 | want1 &= ~(CR0_PG0x80000000 | CR0_PE0x00000001); |
2649 | want0 &= ~(CR0_PG0x80000000 | CR0_PE0x00000001); |
2650 | } |
2651 | |
2652 | /* |
2653 | * VMX may require some bits to be set that userland should not have |
2654 | * to care about. Set those here. |
2655 | */ |
2656 | if (want1 & CR0_NE0x00000020) |
2657 | cr0 |= CR0_NE0x00000020; |
2658 | |
2659 | if ((cr0 & want1) != want1) { |
2660 | ret = EINVAL22; |
2661 | goto exit; |
2662 | } |
2663 | |
2664 | if ((~cr0 & want0) != want0) { |
2665 | ret = EINVAL22; |
2666 | goto exit; |
2667 | } |
2668 | |
2669 | vcpu->vc_vmx_cr0_fixed1 = want1; |
2670 | vcpu->vc_vmx_cr0_fixed0 = want0; |
2671 | /* |
2672 | * Determine which bits in CR4 have to be set to a fixed |
2673 | * value as per Intel SDM A.8. |
2674 | * CR4 bits in the vrs parameter must match these, except |
2675 | * CR4_VMXE - we add that here since it must always be set. |
2676 | */ |
2677 | want1 = (curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed0) & |
2678 | (curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed1); |
2679 | want0 = ~(curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed0) & |
2680 | ~(curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed1); |
2681 | |
2682 | cr4 = vrs->vrs_crs[VCPU_REGS_CR43] | CR4_VMXE0x00002000; |
2683 | |
2684 | if ((cr4 & want1) != want1) { |
2685 | ret = EINVAL22; |
2686 | goto exit; |
2687 | } |
2688 | |
2689 | if ((~cr4 & want0) != want0) { |
2690 | ret = EINVAL22; |
2691 | goto exit; |
2692 | } |
2693 | |
2694 | cr3 = vrs->vrs_crs[VCPU_REGS_CR32]; |
2695 | |
2696 | /* Restore PDPTEs if 32-bit PAE paging is being used */ |
2697 | if (cr3 && (cr4 & CR4_PAE0x00000020) && |
2698 | !(vrs->vrs_msrs[VCPU_REGS_EFER0] & EFER_LMA0x00000400)) { |
2699 | if (vmwrite(VMCS_GUEST_PDPTE00x280A, |
2700 | vrs->vrs_crs[VCPU_REGS_PDPTE06])) { |
2701 | ret = EINVAL22; |
2702 | goto exit; |
2703 | } |
2704 | |
2705 | if (vmwrite(VMCS_GUEST_PDPTE10x280C, |
2706 | vrs->vrs_crs[VCPU_REGS_PDPTE17])) { |
2707 | ret = EINVAL22; |
2708 | goto exit; |
2709 | } |
2710 | |
2711 | if (vmwrite(VMCS_GUEST_PDPTE20x280E, |
2712 | vrs->vrs_crs[VCPU_REGS_PDPTE28])) { |
2713 | ret = EINVAL22; |
2714 | goto exit; |
2715 | } |
2716 | |
2717 | if (vmwrite(VMCS_GUEST_PDPTE30x2810, |
2718 | vrs->vrs_crs[VCPU_REGS_PDPTE39])) { |
2719 | ret = EINVAL22; |
2720 | goto exit; |
2721 | } |
2722 | } |
2723 | |
2724 | vrs->vrs_crs[VCPU_REGS_CR00] = cr0; |
2725 | vrs->vrs_crs[VCPU_REGS_CR43] = cr4; |
2726 | |
2727 | /* |
2728 | * Select host MSRs to be loaded on exit |
2729 | */ |
2730 | msr_store = (struct vmx_msr_store *)vcpu->vc_vmx_msr_exit_load_va; |
2731 | msr_store[0].vms_index = MSR_EFER0xc0000080; |
2732 | msr_store[0].vms_data = rdmsr(MSR_EFER0xc0000080); |
2733 | msr_store[1].vms_index = MSR_STAR0xc0000081; |
2734 | msr_store[1].vms_data = rdmsr(MSR_STAR0xc0000081); |
2735 | msr_store[2].vms_index = MSR_LSTAR0xc0000082; |
2736 | msr_store[2].vms_data = rdmsr(MSR_LSTAR0xc0000082); |
2737 | msr_store[3].vms_index = MSR_CSTAR0xc0000083; |
2738 | msr_store[3].vms_data = rdmsr(MSR_CSTAR0xc0000083); |
2739 | msr_store[4].vms_index = MSR_SFMASK0xc0000084; |
2740 | msr_store[4].vms_data = rdmsr(MSR_SFMASK0xc0000084); |
2741 | msr_store[5].vms_index = MSR_KERNELGSBASE0xc0000102; |
2742 | msr_store[5].vms_data = rdmsr(MSR_KERNELGSBASE0xc0000102); |
2743 | msr_store[6].vms_index = MSR_MISC_ENABLE0x1a0; |
2744 | msr_store[6].vms_data = rdmsr(MSR_MISC_ENABLE0x1a0); |
2745 | |
2746 | /* |
2747 | * Select guest MSRs to be loaded on entry / saved on exit |
2748 | */ |
2749 | msr_store = (struct vmx_msr_store *)vcpu->vc_vmx_msr_exit_save_va; |
2750 | |
2751 | msr_store[VCPU_REGS_EFER0].vms_index = MSR_EFER0xc0000080; |
2752 | msr_store[VCPU_REGS_STAR1].vms_index = MSR_STAR0xc0000081; |
2753 | msr_store[VCPU_REGS_LSTAR2].vms_index = MSR_LSTAR0xc0000082; |
2754 | msr_store[VCPU_REGS_CSTAR3].vms_index = MSR_CSTAR0xc0000083; |
2755 | msr_store[VCPU_REGS_SFMASK4].vms_index = MSR_SFMASK0xc0000084; |
2756 | msr_store[VCPU_REGS_KGSBASE5].vms_index = MSR_KERNELGSBASE0xc0000102; |
2757 | msr_store[VCPU_REGS_MISC_ENABLE6].vms_index = MSR_MISC_ENABLE0x1a0; |
2758 | |
2759 | /* |
2760 | * Initialize MSR_MISC_ENABLE as it can't be read and populated from vmd |
2761 | * and some of the content is based on the host. |
2762 | */ |
2763 | msr_store[VCPU_REGS_MISC_ENABLE6].vms_data = rdmsr(MSR_MISC_ENABLE0x1a0); |
2764 | msr_store[VCPU_REGS_MISC_ENABLE6].vms_data &= |
2765 | ~(MISC_ENABLE_TCC(1 << 3) | MISC_ENABLE_PERF_MON_AVAILABLE(1 << 7) | |
2766 | MISC_ENABLE_EIST_ENABLED(1 << 16) | MISC_ENABLE_ENABLE_MONITOR_FSM(1 << 18) | |
2767 | MISC_ENABLE_xTPR_MESSAGE_DISABLE(1 << 23)); |
2768 | msr_store[VCPU_REGS_MISC_ENABLE6].vms_data |= |
2769 | MISC_ENABLE_BTS_UNAVAILABLE(1 << 11) | MISC_ENABLE_PEBS_UNAVAILABLE(1 << 12); |
2770 | |
2771 | /* |
2772 | * Currently we have the same count of entry/exit MSRs loads/stores |
2773 | * but this is not an architectural requirement. |
2774 | */ |
2775 | if (vmwrite(VMCS_EXIT_MSR_STORE_COUNT0x400E, VMX_NUM_MSR_STORE7)) { |
2776 | DPRINTF("%s: error setting guest MSR exit store count\n", |
2777 | __func__); |
2778 | ret = EINVAL22; |
2779 | goto exit; |
2780 | } |
2781 | |
2782 | if (vmwrite(VMCS_EXIT_MSR_LOAD_COUNT0x4010, VMX_NUM_MSR_STORE7)) { |
2783 | DPRINTF("%s: error setting guest MSR exit load count\n", |
2784 | __func__); |
2785 | ret = EINVAL22; |
2786 | goto exit; |
2787 | } |
2788 | |
2789 | if (vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT0x4014, VMX_NUM_MSR_STORE7)) { |
2790 | DPRINTF("%s: error setting guest MSR entry load count\n", |
2791 | __func__); |
2792 | ret = EINVAL22; |
2793 | goto exit; |
2794 | } |
2795 | |
2796 | if (vmwrite(VMCS_EXIT_STORE_MSR_ADDRESS0x2006, |
2797 | vcpu->vc_vmx_msr_exit_save_pa)) { |
2798 | DPRINTF("%s: error setting guest MSR exit store address\n", |
2799 | __func__); |
2800 | ret = EINVAL22; |
2801 | goto exit; |
2802 | } |
2803 | |
2804 | if (vmwrite(VMCS_EXIT_LOAD_MSR_ADDRESS0x2008, |
2805 | vcpu->vc_vmx_msr_exit_load_pa)) { |
2806 | DPRINTF("%s: error setting guest MSR exit load address\n", |
2807 | __func__); |
2808 | ret = EINVAL22; |
2809 | goto exit; |
2810 | } |
2811 | |
2812 | if (vmwrite(VMCS_ENTRY_LOAD_MSR_ADDRESS0x200A, |
2813 | vcpu->vc_vmx_msr_exit_save_pa)) { |
2814 | DPRINTF("%s: error setting guest MSR entry load address\n", |
2815 | __func__); |
2816 | ret = EINVAL22; |
2817 | goto exit; |
2818 | } |
2819 | |
2820 | if (vmwrite(VMCS_MSR_BITMAP_ADDRESS0x2004, |
2821 | vcpu->vc_msr_bitmap_pa)) { |
2822 | DPRINTF("%s: error setting guest MSR bitmap address\n", |
2823 | __func__); |
2824 | ret = EINVAL22; |
2825 | goto exit; |
2826 | } |
2827 | |
2828 | if (vmwrite(VMCS_CR4_MASK0x6002, CR4_VMXE0x00002000)) { |
2829 | DPRINTF("%s: error setting guest CR4 mask\n", __func__); |
2830 | ret = EINVAL22; |
2831 | goto exit; |
2832 | } |
2833 | |
2834 | if (vmwrite(VMCS_CR0_MASK0x6000, CR0_NE0x00000020)) { |
2835 | DPRINTF("%s: error setting guest CR0 mask\n", __func__); |
2836 | ret = EINVAL22; |
2837 | goto exit; |
2838 | } |
2839 | |
2840 | /* |
2841 | * Set up the VMCS for the register state we want during VCPU start. |
2842 | * This matches what the CPU state would be after a bootloader |
2843 | * transition to 'start'. |
2844 | */ |
2845 | ret = vcpu_writeregs_vmx(vcpu, VM_RWREGS_ALL(0x1 | 0x2 | 0x4 | 0x8 | 0x10), 0, vrs); |
2846 | |
2847 | /* |
2848 | * Set up the MSR bitmap |
2849 | */ |
2850 | memset((uint8_t *)vcpu->vc_msr_bitmap_va, 0xFF, PAGE_SIZE)__builtin_memset(((uint8_t *)vcpu->vc_msr_bitmap_va), (0xFF ), ((1 << 12))); |
2851 | vmx_setmsrbrw(vcpu, MSR_IA32_FEATURE_CONTROL0x03a); |
2852 | vmx_setmsrbrw(vcpu, MSR_SYSENTER_CS0x174); |
2853 | vmx_setmsrbrw(vcpu, MSR_SYSENTER_ESP0x175); |
2854 | vmx_setmsrbrw(vcpu, MSR_SYSENTER_EIP0x176); |
2855 | vmx_setmsrbrw(vcpu, MSR_EFER0xc0000080); |
2856 | vmx_setmsrbrw(vcpu, MSR_STAR0xc0000081); |
2857 | vmx_setmsrbrw(vcpu, MSR_LSTAR0xc0000082); |
2858 | vmx_setmsrbrw(vcpu, MSR_CSTAR0xc0000083); |
2859 | vmx_setmsrbrw(vcpu, MSR_SFMASK0xc0000084); |
2860 | vmx_setmsrbrw(vcpu, MSR_FSBASE0xc0000100); |
2861 | vmx_setmsrbrw(vcpu, MSR_GSBASE0xc0000101); |
2862 | vmx_setmsrbrw(vcpu, MSR_KERNELGSBASE0xc0000102); |
2863 | |
2864 | vmx_setmsrbr(vcpu, MSR_MISC_ENABLE0x1a0); |
2865 | vmx_setmsrbr(vcpu, MSR_TSC0x010); |
2866 | |
2867 | /* If host supports CET, pass through access to the guest. */ |
2868 | if (rcr4() & CR4_CET0x00800000) |
2869 | vmx_setmsrbrw(vcpu, MSR_S_CET0x6a2); |
2870 | |
2871 | /* XXX CR0 shadow */ |
2872 | /* XXX CR4 shadow */ |
2873 | |
2874 | /* xcr0 power on default sets bit 0 (x87 state) */ |
2875 | vcpu->vc_gueststate.vg_xcr0 = XFEATURE_X870x00000001 & xsave_mask; |
2876 | |
2877 | /* XXX PAT shadow */ |
2878 | vcpu->vc_shadow_pat = rdmsr(MSR_CR_PAT0x277); |
2879 | |
2880 | /* Flush the VMCS */ |
2881 | if (vmclear(&vcpu->vc_control_pa)) { |
2882 | DPRINTF("%s: vmclear failed\n", __func__); |
2883 | ret = EINVAL22; |
2884 | } |
2885 | atomic_swap_uint(&vcpu->vc_vmx_vmcs_state, VMCS_CLEARED)_atomic_swap_uint((&vcpu->vc_vmx_vmcs_state), (0)); |
2886 | |
2887 | exit: |
2888 | return (ret); |
2889 | } |
2890 | |
2891 | /* |
2892 | * vcpu_init_vmx |
2893 | * |
2894 | * Intel VMX specific VCPU initialization routine. |
2895 | * |
2896 | * This function allocates various per-VCPU memory regions, sets up initial |
2897 | * VCPU VMCS controls, and sets initial register values. |
2898 | * |
2899 | * Parameters: |
2900 | * vcpu: the VCPU structure being initialized |
2901 | * |
2902 | * Return values: |
2903 | * 0: the VCPU was initialized successfully |
2904 | * ENOMEM: insufficient resources |
2905 | * EINVAL: an error occurred during VCPU initialization |
2906 | */ |
2907 | int |
2908 | vcpu_init_vmx(struct vcpu *vcpu) |
2909 | { |
2910 | struct vmcs *vmcs; |
2911 | uint64_t msr, eptp; |
2912 | uint32_t cr0, cr4; |
2913 | int ret = 0; |
2914 | |
2915 | /* Allocate VMCS VA */ |
2916 | vcpu->vc_control_va = (vaddr_t)km_alloc(PAGE_SIZE(1 << 12), &kv_page, &kp_zero, |
2917 | &kd_waitok); |
2918 | vcpu->vc_vmx_vmcs_state = VMCS_CLEARED0; |
2919 | |
2920 | if (!vcpu->vc_control_va) |
2921 | return (ENOMEM12); |
2922 | |
2923 | /* Compute VMCS PA */ |
2924 | if (!pmap_extract(pmap_kernel()(&kernel_pmap_store), vcpu->vc_control_va, |
2925 | (paddr_t *)&vcpu->vc_control_pa)) { |
2926 | ret = ENOMEM12; |
2927 | goto exit; |
2928 | } |
2929 | |
2930 | /* Allocate MSR bitmap VA */ |
2931 | vcpu->vc_msr_bitmap_va = (vaddr_t)km_alloc(PAGE_SIZE(1 << 12), &kv_page, &kp_zero, |
2932 | &kd_waitok); |
2933 | |
2934 | if (!vcpu->vc_msr_bitmap_va) { |
2935 | ret = ENOMEM12; |
2936 | goto exit; |
2937 | } |
2938 | |
2939 | /* Compute MSR bitmap PA */ |
2940 | if (!pmap_extract(pmap_kernel()(&kernel_pmap_store), vcpu->vc_msr_bitmap_va, |
2941 | (paddr_t *)&vcpu->vc_msr_bitmap_pa)) { |
2942 | ret = ENOMEM12; |
2943 | goto exit; |
2944 | } |
2945 | |
2946 | /* Allocate MSR exit load area VA */ |
2947 | vcpu->vc_vmx_msr_exit_load_va = (vaddr_t)km_alloc(PAGE_SIZE(1 << 12), &kv_page, |
2948 | &kp_zero, &kd_waitok); |
2949 | |
2950 | if (!vcpu->vc_vmx_msr_exit_load_va) { |
2951 | ret = ENOMEM12; |
2952 | goto exit; |
2953 | } |
2954 | |
2955 | /* Compute MSR exit load area PA */ |
2956 | if (!pmap_extract(pmap_kernel()(&kernel_pmap_store), vcpu->vc_vmx_msr_exit_load_va, |
2957 | &vcpu->vc_vmx_msr_exit_load_pa)) { |
2958 | ret = ENOMEM12; |
2959 | goto exit; |
2960 | } |
2961 | |
2962 | /* Allocate MSR exit save area VA */ |
2963 | vcpu->vc_vmx_msr_exit_save_va = (vaddr_t)km_alloc(PAGE_SIZE(1 << 12), &kv_page, |
2964 | &kp_zero, &kd_waitok); |
2965 | |
2966 | if (!vcpu->vc_vmx_msr_exit_save_va) { |
2967 | ret = ENOMEM12; |
2968 | goto exit; |
2969 | } |
2970 | |
2971 | /* Compute MSR exit save area PA */ |
2972 | if (!pmap_extract(pmap_kernel()(&kernel_pmap_store), vcpu->vc_vmx_msr_exit_save_va, |
2973 | &vcpu->vc_vmx_msr_exit_save_pa)) { |
2974 | ret = ENOMEM12; |
2975 | goto exit; |
2976 | } |
2977 | |
2978 | /* Allocate MSR entry load area VA */ |
2979 | vcpu->vc_vmx_msr_entry_load_va = (vaddr_t)km_alloc(PAGE_SIZE(1 << 12), &kv_page, |
2980 | &kp_zero, &kd_waitok); |
2981 | |
2982 | if (!vcpu->vc_vmx_msr_entry_load_va) { |
2983 | ret = ENOMEM12; |
2984 | goto exit; |
2985 | } |
2986 | |
2987 | /* Compute MSR entry load area PA */ |
2988 | if (!pmap_extract(pmap_kernel()(&kernel_pmap_store), vcpu->vc_vmx_msr_entry_load_va, |
2989 | &vcpu->vc_vmx_msr_entry_load_pa)) { |
2990 | ret = ENOMEM12; |
2991 | goto exit; |
2992 | } |
2993 | |
2994 | vmcs = (struct vmcs *)vcpu->vc_control_va; |
2995 | vmcs->vmcs_revision = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_vmx.vmx_vmxon_revision; |
2996 | |
2997 | /* |
2998 | * Load the VMCS onto this PCPU so we can write registers |
2999 | */ |
3000 | if (vmptrld(&vcpu->vc_control_pa)) { |
3001 | ret = EINVAL22; |
3002 | goto exit; |
3003 | } |
3004 | |
3005 | /* Configure EPT Pointer */ |
3006 | eptp = vcpu->vc_parent->vm_map->pmap->pm_pdirpa; |
3007 | msr = rdmsr(IA32_VMX_EPT_VPID_CAP0x48C); |
3008 | if (msr & IA32_EPT_VPID_CAP_PAGE_WALK_4(1ULL << 6)) { |
3009 | /* Page walk length 4 supported */ |
3010 | eptp |= ((IA32_EPT_PAGE_WALK_LENGTH0x4 - 1) << 3); |
3011 | } else { |
3012 | DPRINTF("EPT page walk length 4 not supported\n"); |
3013 | ret = EINVAL22; |
3014 | goto exit; |
3015 | } |
3016 | |
3017 | if (msr & IA32_EPT_VPID_CAP_WB(1ULL << 14)) { |
3018 | /* WB cache type supported */ |
3019 | eptp |= IA32_EPT_PAGING_CACHE_TYPE_WB0x6; |
3020 | } else |
3021 | DPRINTF("%s: no WB cache type available, guest VM will run " |
3022 | "uncached\n", __func__); |
3023 | |
3024 | DPRINTF("Guest EPTP = 0x%llx\n", eptp); |
3025 | if (vmwrite(VMCS_GUEST_IA32_EPTP0x201A, eptp)) { |
3026 | DPRINTF("%s: error setting guest EPTP\n", __func__); |
3027 | ret = EINVAL22; |
3028 | goto exit; |
3029 | } |
3030 | |
3031 | vcpu->vc_parent->vm_map->pmap->eptp = eptp; |
3032 | |
3033 | /* Host CR0 */ |
3034 | cr0 = rcr0() & ~CR0_TS0x00000008; |
3035 | if (vmwrite(VMCS_HOST_IA32_CR00x6C00, cr0)) { |
3036 | DPRINTF("%s: error writing host CR0\n", __func__); |
3037 | ret = EINVAL22; |
3038 | goto exit; |
3039 | } |
3040 | |
3041 | /* Host CR4 */ |
3042 | cr4 = rcr4(); |
3043 | if (vmwrite(VMCS_HOST_IA32_CR40x6C04, cr4)) { |
3044 | DPRINTF("%s: error writing host CR4\n", __func__); |
3045 | ret = EINVAL22; |
3046 | goto exit; |
3047 | } |
3048 | |
3049 | /* Host Segment Selectors */ |
3050 | if (vmwrite(VMCS_HOST_IA32_CS_SEL0x0C02, GSEL(GCODE_SEL, SEL_KPL)(((1) << 3) | 0))) { |
3051 | DPRINTF("%s: error writing host CS selector\n", __func__); |
3052 | ret = EINVAL22; |
3053 | goto exit; |
3054 | } |
3055 | |
3056 | if (vmwrite(VMCS_HOST_IA32_DS_SEL0x0C06, GSEL(GDATA_SEL, SEL_KPL)(((2) << 3) | 0))) { |
3057 | DPRINTF("%s: error writing host DS selector\n", __func__); |
3058 | ret = EINVAL22; |
3059 | goto exit; |
3060 | } |
3061 | |
3062 | if (vmwrite(VMCS_HOST_IA32_ES_SEL0x0C00, GSEL(GDATA_SEL, SEL_KPL)(((2) << 3) | 0))) { |
3063 | DPRINTF("%s: error writing host ES selector\n", __func__); |
3064 | ret = EINVAL22; |
3065 | goto exit; |
3066 | } |
3067 | |
3068 | if (vmwrite(VMCS_HOST_IA32_FS_SEL0x0C08, GSEL(GDATA_SEL, SEL_KPL)(((2) << 3) | 0))) { |
3069 | DPRINTF("%s: error writing host FS selector\n", __func__); |
3070 | ret = EINVAL22; |
3071 | goto exit; |
3072 | } |
3073 | |
3074 | if (vmwrite(VMCS_HOST_IA32_GS_SEL0x0C0A, GSEL(GDATA_SEL, SEL_KPL)(((2) << 3) | 0))) { |
3075 | DPRINTF("%s: error writing host GS selector\n", __func__); |
3076 | ret = EINVAL22; |
3077 | goto exit; |
3078 | } |
3079 | |
3080 | if (vmwrite(VMCS_HOST_IA32_SS_SEL0x0C04, GSEL(GDATA_SEL, SEL_KPL)(((2) << 3) | 0))) { |
3081 | DPRINTF("%s: error writing host SS selector\n", __func__); |
3082 | ret = EINVAL22; |
3083 | goto exit; |
3084 | } |
3085 | |
3086 | if (vmwrite(VMCS_HOST_IA32_TR_SEL0x0C0C, GSYSSEL(GPROC0_SEL, SEL_KPL)((((0) << 4) + (6 << 3)) | 0))) { |
3087 | DPRINTF("%s: error writing host TR selector\n", __func__); |
3088 | ret = EINVAL22; |
3089 | goto exit; |
3090 | } |
3091 | |
3092 | /* Host IDTR base */ |
3093 | if (vmwrite(VMCS_HOST_IA32_IDTR_BASE0x6C0E, idt_vaddr)) { |
3094 | DPRINTF("%s: error writing host IDTR base\n", __func__); |
3095 | ret = EINVAL22; |
3096 | goto exit; |
3097 | } |
3098 | |
3099 | /* VMCS link */ |
3100 | if (vmwrite(VMCS_LINK_POINTER0x2800, VMX_VMCS_PA_CLEAR0xFFFFFFFFFFFFFFFFUL)) { |
3101 | DPRINTF("%s: error writing VMCS link pointer\n", __func__); |
3102 | ret = EINVAL22; |
3103 | goto exit; |
3104 | } |
3105 | |
3106 | /* Flush the initial VMCS */ |
3107 | if (vmclear(&vcpu->vc_control_pa)) { |
3108 | DPRINTF("%s: vmclear failed\n", __func__); |
3109 | ret = EINVAL22; |
3110 | } |
3111 | |
3112 | exit: |
3113 | if (ret) |
3114 | vcpu_deinit_vmx(vcpu); |
3115 | |
3116 | return (ret); |
3117 | } |
3118 | |
3119 | /* |
3120 | * vcpu_reset_regs |
3121 | * |
3122 | * Resets a vcpu's registers to the provided state |
3123 | * |
3124 | * Parameters: |
3125 | * vcpu: the vcpu whose registers shall be reset |
3126 | * vrs: the desired register state |
3127 | * |
3128 | * Return values: |
3129 | * 0: the vcpu's registers were successfully reset |
3130 | * !0: the vcpu's registers could not be reset (see arch-specific reset |
3131 | * function for various values that can be returned here) |
3132 | */ |
3133 | int |
3134 | vcpu_reset_regs(struct vcpu *vcpu, struct vcpu_reg_state *vrs) |
3135 | { |
3136 | int ret; |
3137 | |
3138 | if (vmm_softc->mode == VMM_MODE_EPT) |
3139 | ret = vcpu_reset_regs_vmx(vcpu, vrs); |
3140 | else if (vmm_softc->mode == VMM_MODE_RVI) |
3141 | ret = vcpu_reset_regs_svm(vcpu, vrs); |
3142 | else |
3143 | panic("%s: unknown vmm mode: %d", __func__, vmm_softc->mode); |
3144 | |
3145 | return (ret); |
3146 | } |
3147 | |
3148 | /* |
3149 | * vcpu_init_svm |
3150 | * |
3151 | * AMD SVM specific VCPU initialization routine. |
3152 | * |
3153 | * This function allocates various per-VCPU memory regions, sets up initial |
3154 | * VCPU VMCB controls, and sets initial register values. |
3155 | * |
3156 | * Parameters: |
3157 | * vcpu: the VCPU structure being initialized |
3158 | * |
3159 | * Return values: |
3160 | * 0: the VCPU was initialized successfully |
3161 | * ENOMEM: insufficient resources |
3162 | * EINVAL: an error occurred during VCPU initialization |
3163 | */ |
3164 | int |
3165 | vcpu_init_svm(struct vcpu *vcpu) |
3166 | { |
3167 | int ret = 0; |
3168 | |
3169 | /* Allocate VMCB VA */ |
3170 | vcpu->vc_control_va = (vaddr_t)km_alloc(PAGE_SIZE(1 << 12), &kv_page, &kp_zero, |
3171 | &kd_waitok); |
3172 | |
3173 | if (!vcpu->vc_control_va) |
3174 | return (ENOMEM12); |
3175 | |
3176 | /* Compute VMCB PA */ |
3177 | if (!pmap_extract(pmap_kernel()(&kernel_pmap_store), vcpu->vc_control_va, |
3178 | (paddr_t *)&vcpu->vc_control_pa)) { |
3179 | ret = ENOMEM12; |
3180 | goto exit; |
3181 | } |
3182 | |
3183 | DPRINTF("%s: VMCB va @ 0x%llx, pa @ 0x%llx\n", __func__, |
3184 | (uint64_t)vcpu->vc_control_va, |
3185 | (uint64_t)vcpu->vc_control_pa); |
3186 | |
3187 | |
3188 | /* Allocate MSR bitmap VA (2 pages) */ |
3189 | vcpu->vc_msr_bitmap_va = (vaddr_t)km_alloc(2 * PAGE_SIZE(1 << 12), &kv_any, |
3190 | &vmm_kp_contig, &kd_waitok); |
3191 | |
3192 | if (!vcpu->vc_msr_bitmap_va) { |
3193 | ret = ENOMEM12; |
3194 | goto exit; |
3195 | } |
3196 | |
3197 | /* Compute MSR bitmap PA */ |
3198 | if (!pmap_extract(pmap_kernel()(&kernel_pmap_store), vcpu->vc_msr_bitmap_va, |
3199 | (paddr_t *)&vcpu->vc_msr_bitmap_pa)) { |
3200 | ret = ENOMEM12; |
3201 | goto exit; |
3202 | } |
3203 | |
3204 | DPRINTF("%s: MSR bitmap va @ 0x%llx, pa @ 0x%llx\n", __func__, |
3205 | (uint64_t)vcpu->vc_msr_bitmap_va, |
3206 | (uint64_t)vcpu->vc_msr_bitmap_pa); |
3207 | |
3208 | /* Allocate host state area VA */ |
3209 | vcpu->vc_svm_hsa_va = (vaddr_t)km_alloc(PAGE_SIZE(1 << 12), &kv_page, |
3210 | &kp_zero, &kd_waitok); |
3211 | |
3212 | if (!vcpu->vc_svm_hsa_va) { |
3213 | ret = ENOMEM12; |
3214 | goto exit; |
3215 | } |
3216 | |
3217 | /* Compute host state area PA */ |
3218 | if (!pmap_extract(pmap_kernel()(&kernel_pmap_store), vcpu->vc_svm_hsa_va, |
3219 | &vcpu->vc_svm_hsa_pa)) { |
3220 | ret = ENOMEM12; |
3221 | goto exit; |
3222 | } |
3223 | |
3224 | DPRINTF("%s: HSA va @ 0x%llx, pa @ 0x%llx\n", __func__, |
3225 | (uint64_t)vcpu->vc_svm_hsa_va, |
3226 | (uint64_t)vcpu->vc_svm_hsa_pa); |
3227 | |
3228 | /* Allocate IOIO area VA (3 pages) */ |
3229 | vcpu->vc_svm_ioio_va = (vaddr_t)km_alloc(3 * PAGE_SIZE(1 << 12), &kv_any, |
3230 | &vmm_kp_contig, &kd_waitok); |
3231 | |
3232 | if (!vcpu->vc_svm_ioio_va) { |
3233 | ret = ENOMEM12; |
3234 | goto exit; |
3235 | } |
3236 | |
3237 | /* Compute IOIO area PA */ |
3238 | if (!pmap_extract(pmap_kernel()(&kernel_pmap_store), vcpu->vc_svm_ioio_va, |
3239 | &vcpu->vc_svm_ioio_pa)) { |
3240 | ret = ENOMEM12; |
3241 | goto exit; |
3242 | } |
3243 | |
3244 | DPRINTF("%s: IOIO va @ 0x%llx, pa @ 0x%llx\n", __func__, |
3245 | (uint64_t)vcpu->vc_svm_ioio_va, |
3246 | (uint64_t)vcpu->vc_svm_ioio_pa); |
3247 | |
3248 | exit: |
3249 | if (ret) |
3250 | vcpu_deinit_svm(vcpu); |
3251 | |
3252 | return (ret); |
3253 | } |
3254 | |
3255 | /* |
3256 | * vcpu_init |
3257 | * |
3258 | * Calls the architecture-specific VCPU init routine |
3259 | */ |
3260 | int |
3261 | vcpu_init(struct vcpu *vcpu) |
3262 | { |
3263 | int ret = 0; |
3264 | |
3265 | vcpu->vc_virt_mode = vmm_softc->mode; |
3266 | vcpu->vc_state = VCPU_STATE_STOPPED; |
3267 | vcpu->vc_vpid = 0; |
3268 | vcpu->vc_pvclock_system_gpa = 0; |
3269 | vcpu->vc_last_pcpu = NULL((void *)0); |
3270 | |
3271 | rw_init(&vcpu->vc_lock, "vcpu")_rw_init_flags(&vcpu->vc_lock, "vcpu", 0, ((void *)0)); |
3272 | |
3273 | /* Shadow PAT MSR, starting with host's value. */ |
3274 | vcpu->vc_shadow_pat = rdmsr(MSR_CR_PAT0x277); |
3275 | |
3276 | if (vmm_softc->mode == VMM_MODE_EPT) |
3277 | ret = vcpu_init_vmx(vcpu); |
3278 | else if (vmm_softc->mode == VMM_MODE_RVI) |
3279 | ret = vcpu_init_svm(vcpu); |
3280 | else |
3281 | panic("%s: unknown vmm mode: %d", __func__, vmm_softc->mode); |
3282 | |
3283 | return (ret); |
3284 | } |
3285 | |
3286 | /* |
3287 | * vcpu_deinit_vmx |
3288 | * |
3289 | * Deinitializes the vcpu described by 'vcpu' |
3290 | * |
3291 | * Parameters: |
3292 | * vcpu: the vcpu to be deinited |
3293 | */ |
3294 | void |
3295 | vcpu_deinit_vmx(struct vcpu *vcpu) |
3296 | { |
3297 | if (vcpu->vc_control_va) { |
3298 | km_free((void *)vcpu->vc_control_va, PAGE_SIZE(1 << 12), |
3299 | &kv_page, &kp_zero); |
3300 | vcpu->vc_control_va = 0; |
3301 | } |
3302 | if (vcpu->vc_vmx_msr_exit_save_va) { |
3303 | km_free((void *)vcpu->vc_vmx_msr_exit_save_va, |
3304 | PAGE_SIZE(1 << 12), &kv_page, &kp_zero); |
3305 | vcpu->vc_vmx_msr_exit_save_va = 0; |
3306 | } |
3307 | if (vcpu->vc_vmx_msr_exit_load_va) { |
3308 | km_free((void *)vcpu->vc_vmx_msr_exit_load_va, |
3309 | PAGE_SIZE(1 << 12), &kv_page, &kp_zero); |
3310 | vcpu->vc_vmx_msr_exit_load_va = 0; |
3311 | } |
3312 | if (vcpu->vc_vmx_msr_entry_load_va) { |
3313 | km_free((void *)vcpu->vc_vmx_msr_entry_load_va, |
3314 | PAGE_SIZE(1 << 12), &kv_page, &kp_zero); |
3315 | vcpu->vc_vmx_msr_entry_load_va = 0; |
3316 | } |
3317 | |
3318 | if (vcpu->vc_vmx_vpid_enabled) |
3319 | vmm_free_vpid(vcpu->vc_vpid); |
3320 | } |
3321 | |
3322 | /* |
3323 | * vcpu_deinit_svm |
3324 | * |
3325 | * Deinitializes the vcpu described by 'vcpu' |
3326 | * |
3327 | * Parameters: |
3328 | * vcpu: the vcpu to be deinited |
3329 | */ |
3330 | void |
3331 | vcpu_deinit_svm(struct vcpu *vcpu) |
3332 | { |
3333 | if (vcpu->vc_control_va) { |
3334 | km_free((void *)vcpu->vc_control_va, PAGE_SIZE(1 << 12), &kv_page, |
3335 | &kp_zero); |
3336 | vcpu->vc_control_va = 0; |
3337 | } |
3338 | if (vcpu->vc_msr_bitmap_va) { |
3339 | km_free((void *)vcpu->vc_msr_bitmap_va, 2 * PAGE_SIZE(1 << 12), &kv_any, |
3340 | &vmm_kp_contig); |
3341 | vcpu->vc_msr_bitmap_va = 0; |
3342 | } |
3343 | if (vcpu->vc_svm_hsa_va) { |
3344 | km_free((void *)vcpu->vc_svm_hsa_va, PAGE_SIZE(1 << 12), &kv_page, |
3345 | &kp_zero); |
3346 | vcpu->vc_svm_hsa_va = 0; |
3347 | } |
3348 | if (vcpu->vc_svm_ioio_va) { |
3349 | km_free((void *)vcpu->vc_svm_ioio_va, 3 * PAGE_SIZE(1 << 12), &kv_any, |
3350 | &vmm_kp_contig); |
3351 | vcpu->vc_svm_ioio_va = 0; |
3352 | } |
3353 | |
3354 | vmm_free_vpid(vcpu->vc_vpid); |
3355 | } |
3356 | |
3357 | /* |
3358 | * vcpu_deinit |
3359 | * |
3360 | * Calls the architecture-specific VCPU deinit routine |
3361 | * |
3362 | * Parameters: |
3363 | * vcpu: the vcpu to be deinited |
3364 | */ |
3365 | void |
3366 | vcpu_deinit(struct vcpu *vcpu) |
3367 | { |
3368 | if (vmm_softc->mode == VMM_MODE_EPT) |
3369 | vcpu_deinit_vmx(vcpu); |
3370 | else if (vmm_softc->mode == VMM_MODE_RVI) |
3371 | vcpu_deinit_svm(vcpu); |
3372 | else |
3373 | panic("%s: unknown vmm mode: %d", __func__, vmm_softc->mode); |
3374 | } |
3375 | |
3376 | /* |
3377 | * vcpu_vmx_check_cap |
3378 | * |
3379 | * Checks if the 'cap' bit in the 'msr' MSR can be set or cleared (set = 1 |
3380 | * or set = 0, respectively). |
3381 | * |
3382 | * When considering 'msr', we check to see if true controls are available, |
3383 | * and use those if so. |
3384 | * |
3385 | * Returns 1 of 'cap' can be set/cleared as requested, 0 otherwise. |
3386 | */ |
3387 | int |
3388 | vcpu_vmx_check_cap(struct vcpu *vcpu, uint32_t msr, uint32_t cap, int set) |
3389 | { |
3390 | uint64_t ctl; |
3391 | |
3392 | if (vcpu->vc_vmx_basic & IA32_VMX_TRUE_CTLS_AVAIL(1ULL << 55)) { |
3393 | switch (msr) { |
3394 | case IA32_VMX_PINBASED_CTLS0x481: |
3395 | ctl = vcpu->vc_vmx_true_pinbased_ctls; |
3396 | break; |
3397 | case IA32_VMX_PROCBASED_CTLS0x482: |
3398 | ctl = vcpu->vc_vmx_true_procbased_ctls; |
3399 | break; |
3400 | case IA32_VMX_PROCBASED2_CTLS0x48B: |
3401 | ctl = vcpu->vc_vmx_procbased2_ctls; |
3402 | break; |
3403 | case IA32_VMX_ENTRY_CTLS0x484: |
3404 | ctl = vcpu->vc_vmx_true_entry_ctls; |
3405 | break; |
3406 | case IA32_VMX_EXIT_CTLS0x483: |
3407 | ctl = vcpu->vc_vmx_true_exit_ctls; |
3408 | break; |
3409 | default: |
3410 | return (0); |
3411 | } |
3412 | } else { |
3413 | switch (msr) { |
3414 | case IA32_VMX_PINBASED_CTLS0x481: |
3415 | ctl = vcpu->vc_vmx_pinbased_ctls; |
3416 | break; |
3417 | case IA32_VMX_PROCBASED_CTLS0x482: |
3418 | ctl = vcpu->vc_vmx_procbased_ctls; |
3419 | break; |
3420 | case IA32_VMX_PROCBASED2_CTLS0x48B: |
3421 | ctl = vcpu->vc_vmx_procbased2_ctls; |
3422 | break; |
3423 | case IA32_VMX_ENTRY_CTLS0x484: |
3424 | ctl = vcpu->vc_vmx_entry_ctls; |
3425 | break; |
3426 | case IA32_VMX_EXIT_CTLS0x483: |
3427 | ctl = vcpu->vc_vmx_exit_ctls; |
3428 | break; |
3429 | default: |
3430 | return (0); |
3431 | } |
3432 | } |
3433 | |
3434 | if (set) { |
3435 | /* Check bit 'cap << 32', must be !0 */ |
3436 | return (ctl & ((uint64_t)cap << 32)) != 0; |
3437 | } else { |
3438 | /* Check bit 'cap', must be 0 */ |
3439 | return (ctl & cap) == 0; |
3440 | } |
3441 | } |
3442 | |
3443 | /* |
3444 | * vcpu_vmx_compute_ctrl |
3445 | * |
3446 | * Computes the appropriate control value, given the supplied parameters |
3447 | * and CPU capabilities. |
3448 | * |
3449 | * Intel has made somewhat of a mess of this computation - it is described |
3450 | * using no fewer than three different approaches, spread across many |
3451 | * pages of the SDM. Further compounding the problem is the fact that now |
3452 | * we have "true controls" for each type of "control", and each needs to |
3453 | * be examined to get the calculation right, but only if "true" controls |
3454 | * are present on the CPU we're on. |
3455 | * |
3456 | * Parameters: |
3457 | * ctrlval: the control value, as read from the CPU MSR |
3458 | * ctrl: which control is being set (eg, pinbased, procbased, etc) |
3459 | * want0: the set of desired 0 bits |
3460 | * want1: the set of desired 1 bits |
3461 | * out: (out) the correct value to write into the VMCS for this VCPU, |
3462 | * for the 'ctrl' desired. |
3463 | * |
3464 | * Returns 0 if successful, or EINVAL if the supplied parameters define |
3465 | * an unworkable control setup. |
3466 | */ |
3467 | int |
3468 | vcpu_vmx_compute_ctrl(uint64_t ctrlval, uint16_t ctrl, uint32_t want1, |
3469 | uint32_t want0, uint32_t *out) |
3470 | { |
3471 | int i, set, clear; |
3472 | |
3473 | *out = 0; |
3474 | |
3475 | /* |
3476 | * The Intel SDM gives three formulae for determining which bits to |
3477 | * set/clear for a given control and desired functionality. Formula |
3478 | * 1 is the simplest but disallows use of newer features that are |
3479 | * enabled by functionality in later CPUs. |
3480 | * |
3481 | * Formulas 2 and 3 allow such extra functionality. We use formula |
3482 | * 2 - this requires us to know the identity of controls in the |
3483 | * "default1" class for each control register, but allows us to not |
3484 | * have to pass along and/or query both sets of capability MSRs for |
3485 | * each control lookup. This makes the code slightly longer, |
3486 | * however. |
3487 | */ |
3488 | for (i = 0; i < 32; i++) { |
3489 | /* Figure out if we can set and / or clear this bit */ |
3490 | set = (ctrlval & (1ULL << (i + 32))) != 0; |
3491 | clear = ((1ULL << i) & ((uint64_t)ctrlval)) == 0; |
3492 | |
3493 | /* If the bit can't be set nor cleared, something's wrong */ |
3494 | if (!set && !clear) |
3495 | return (EINVAL22); |
3496 | |
3497 | /* |
3498 | * Formula 2.c.i - "If the relevant VMX capability MSR |
3499 | * reports that a control has a single setting, use that |
3500 | * setting." |
3501 | */ |
3502 | if (set && !clear) { |
3503 | if (want0 & (1ULL << i)) |
3504 | return (EINVAL22); |
3505 | else |
3506 | *out |= (1ULL << i); |
3507 | } else if (clear && !set) { |
3508 | if (want1 & (1ULL << i)) |
3509 | return (EINVAL22); |
3510 | else |
3511 | *out &= ~(1ULL << i); |
3512 | } else { |
3513 | /* |
3514 | * 2.c.ii - "If the relevant VMX capability MSR |
3515 | * reports that a control can be set to 0 or 1 |
3516 | * and that control's meaning is known to the VMM, |
3517 | * set the control based on the functionality desired." |
3518 | */ |
3519 | if (want1 & (1ULL << i)) |
3520 | *out |= (1ULL << i); |
3521 | else if (want0 & (1 << i)) |
3522 | *out &= ~(1ULL << i); |
3523 | else { |
3524 | /* |
3525 | * ... assuming the control's meaning is not |
3526 | * known to the VMM ... |
3527 | * |
3528 | * 2.c.iii - "If the relevant VMX capability |
3529 | * MSR reports that a control can be set to 0 |
3530 | * or 1 and the control is not in the default1 |
3531 | * class, set the control to 0." |
3532 | * |
3533 | * 2.c.iv - "If the relevant VMX capability |
3534 | * MSR reports that a control can be set to 0 |
3535 | * or 1 and the control is in the default1 |
3536 | * class, set the control to 1." |
3537 | */ |
3538 | switch (ctrl) { |
3539 | case IA32_VMX_PINBASED_CTLS0x481: |
3540 | case IA32_VMX_TRUE_PINBASED_CTLS0x48D: |
3541 | /* |
3542 | * A.3.1 - default1 class of pinbased |
3543 | * controls comprises bits 1,2,4 |
3544 | */ |
3545 | switch (i) { |
3546 | case 1: |
3547 | case 2: |
3548 | case 4: |
3549 | *out |= (1ULL << i); |
3550 | break; |
3551 | default: |
3552 | *out &= ~(1ULL << i); |
3553 | break; |
3554 | } |
3555 | break; |
3556 | case IA32_VMX_PROCBASED_CTLS0x482: |
3557 | case IA32_VMX_TRUE_PROCBASED_CTLS0x48E: |
3558 | /* |
3559 | * A.3.2 - default1 class of procbased |
3560 | * controls comprises bits 1, 4-6, 8, |
3561 | * 13-16, 26 |
3562 | */ |
3563 | switch (i) { |
3564 | case 1: |
3565 | case 4 ... 6: |
3566 | case 8: |
3567 | case 13 ... 16: |
3568 | case 26: |
3569 | *out |= (1ULL << i); |
3570 | break; |
3571 | default: |
3572 | *out &= ~(1ULL << i); |
3573 | break; |
3574 | } |
3575 | break; |
3576 | /* |
3577 | * Unknown secondary procbased controls |
3578 | * can always be set to 0 |
3579 | */ |
3580 | case IA32_VMX_PROCBASED2_CTLS0x48B: |
3581 | *out &= ~(1ULL << i); |
3582 | break; |
3583 | case IA32_VMX_EXIT_CTLS0x483: |
3584 | case IA32_VMX_TRUE_EXIT_CTLS0x48F: |
3585 | /* |
3586 | * A.4 - default1 class of exit |
3587 | * controls comprises bits 0-8, 10, |
3588 | * 11, 13, 14, 16, 17 |
3589 | */ |
3590 | switch (i) { |
3591 | case 0 ... 8: |
3592 | case 10 ... 11: |
3593 | case 13 ... 14: |
3594 | case 16 ... 17: |
3595 | *out |= (1ULL << i); |
3596 | break; |
3597 | default: |
3598 | *out &= ~(1ULL << i); |
3599 | break; |
3600 | } |
3601 | break; |
3602 | case IA32_VMX_ENTRY_CTLS0x484: |
3603 | case IA32_VMX_TRUE_ENTRY_CTLS0x490: |
3604 | /* |
3605 | * A.5 - default1 class of entry |
3606 | * controls comprises bits 0-8, 12 |
3607 | */ |
3608 | switch (i) { |
3609 | case 0 ... 8: |
3610 | case 12: |
3611 | *out |= (1ULL << i); |
3612 | break; |
3613 | default: |
3614 | *out &= ~(1ULL << i); |
3615 | break; |
3616 | } |
3617 | break; |
3618 | } |
3619 | } |
3620 | } |
3621 | } |
3622 | |
3623 | return (0); |
3624 | } |
3625 | |
3626 | /* |
3627 | * vm_run |
3628 | * |
3629 | * Run the vm / vcpu specified by 'vrp' |
3630 | * |
3631 | * Parameters: |
3632 | * vrp: structure defining the VM to run |
3633 | * |
3634 | * Return value: |
3635 | * ENOENT: the VM defined in 'vrp' could not be located |
3636 | * EBUSY: the VM defined in 'vrp' is already running |
3637 | * EFAULT: error copying data from userspace (vmd) on return from previous |
3638 | * exit. |
3639 | * EAGAIN: help is needed from vmd(8) (device I/O or exit vmm(4) cannot |
3640 | * handle in-kernel.) |
3641 | * 0: the run loop exited and no help is needed from vmd(8) |
3642 | */ |
3643 | int |
3644 | vm_run(struct vm_run_params *vrp) |
3645 | { |
3646 | struct vm *vm; |
3647 | struct vcpu *vcpu; |
3648 | int ret = 0; |
3649 | u_int old, next; |
3650 | |
3651 | /* |
3652 | * Find desired VM |
3653 | */ |
3654 | ret = vm_find(vrp->vrp_vm_id, &vm); |
3655 | if (ret) |
3656 | return (ret); |
3657 | |
3658 | vcpu = vm_find_vcpu(vm, vrp->vrp_vcpu_id); |
3659 | if (vcpu == NULL((void *)0)) { |
3660 | ret = ENOENT2; |
3661 | goto out; |
3662 | } |
3663 | |
3664 | /* |
3665 | * Attempt to transition from VCPU_STATE_STOPPED -> VCPU_STATE_RUNNING. |
3666 | * Failure to make the transition indicates the VCPU is busy. |
3667 | */ |
3668 | rw_enter_write(&vcpu->vc_lock); |
3669 | old = VCPU_STATE_STOPPED; |
3670 | next = VCPU_STATE_RUNNING; |
3671 | if (atomic_cas_uint(&vcpu->vc_state, old, next)_atomic_cas_uint((&vcpu->vc_state), (old), (next)) != old) { |
3672 | ret = EBUSY16; |
3673 | goto out_unlock; |
3674 | } |
3675 | |
3676 | /* |
3677 | * We may be returning from userland helping us from the last exit. |
3678 | * If so (vrp_continue == 1), copy in the exit data from vmd. The |
3679 | * exit data will be consumed before the next entry (this typically |
3680 | * comprises VCPU register changes as the result of vmd(8)'s actions). |
3681 | */ |
3682 | if (vrp->vrp_continue) { |
3683 | if (copyin(vrp->vrp_exit, &vcpu->vc_exit, |
3684 | sizeof(struct vm_exit)) == EFAULT14) { |
3685 | ret = EFAULT14; |
3686 | goto out_unlock; |
3687 | } |
3688 | } |
3689 | |
3690 | WRITE_ONCE(vcpu->vc_curcpu, curcpu())({ typeof(vcpu->vc_curcpu) __tmp = (({struct cpu_info *__ci ; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof (struct cpu_info, ci_self))); __ci;})); *(volatile typeof(vcpu ->vc_curcpu) *)&(vcpu->vc_curcpu) = __tmp; __tmp; } ); |
3691 | /* Run the VCPU specified in vrp */ |
3692 | if (vcpu->vc_virt_mode == VMM_MODE_EPT) { |
3693 | ret = vcpu_run_vmx(vcpu, vrp); |
3694 | } else if (vcpu->vc_virt_mode == VMM_MODE_RVI) { |
3695 | ret = vcpu_run_svm(vcpu, vrp); |
3696 | } |
3697 | WRITE_ONCE(vcpu->vc_curcpu, NULL)({ typeof(vcpu->vc_curcpu) __tmp = (((void *)0)); *(volatile typeof(vcpu->vc_curcpu) *)&(vcpu->vc_curcpu) = __tmp ; __tmp; }); |
3698 | |
3699 | if (ret == 0 || ret == EAGAIN35) { |
3700 | /* If we are exiting, populate exit data so vmd can help. */ |
3701 | vrp->vrp_exit_reason = (ret == 0) ? VM_EXIT_NONE0xFFFF |
3702 | : vcpu->vc_gueststate.vg_exit_reason; |
3703 | vrp->vrp_irqready = vcpu->vc_irqready; |
3704 | vcpu->vc_state = VCPU_STATE_STOPPED; |
3705 | |
3706 | if (copyout(&vcpu->vc_exit, vrp->vrp_exit, |
3707 | sizeof(struct vm_exit)) == EFAULT14) { |
3708 | ret = EFAULT14; |
3709 | } else |
3710 | ret = 0; |
3711 | } else { |
3712 | vrp->vrp_exit_reason = VM_EXIT_TERMINATED0xFFFE; |
3713 | vcpu->vc_state = VCPU_STATE_TERMINATED; |
3714 | } |
3715 | out_unlock: |
3716 | rw_exit_write(&vcpu->vc_lock); |
3717 | out: |
3718 | refcnt_rele_wake(&vm->vm_refcnt); |
3719 | return (ret); |
3720 | } |
3721 | |
3722 | /* |
3723 | * vmm_fpurestore |
3724 | * |
3725 | * Restore the guest's FPU state, saving the existing userland thread's |
3726 | * FPU context if necessary. Must be called with interrupts disabled. |
3727 | */ |
3728 | int |
3729 | vmm_fpurestore(struct vcpu *vcpu) |
3730 | { |
3731 | struct cpu_info *ci = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;}); |
3732 | |
3733 | rw_assert_wrlock(&vcpu->vc_lock); |
3734 | |
3735 | /* save vmm's FPU state if we haven't already */ |
3736 | if (ci->ci_pflags & CPUPF_USERXSTATE0x02) { |
3737 | ci->ci_pflags &= ~CPUPF_USERXSTATE0x02; |
3738 | fpusavereset(&curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc->p_addr->u_pcb.pcb_savefpu); |
3739 | } |
3740 | |
3741 | if (vcpu->vc_fpuinited) |
3742 | xrstor_kern(&vcpu->vc_g_fpu, xsave_mask); |
3743 | |
3744 | if (xsave_mask) { |
3745 | /* Restore guest %xcr0 */ |
3746 | if (xsetbv_user(0, vcpu->vc_gueststate.vg_xcr0)) { |
3747 | DPRINTF("%s: guest attempted to set invalid bits in " |
3748 | "xcr0 (guest %%xcr0=0x%llx, host %%xcr0=0x%llx)\n", |
3749 | __func__, vcpu->vc_gueststate.vg_xcr0, xsave_mask); |
3750 | return EINVAL22; |
3751 | } |
3752 | } |
3753 | |
3754 | return 0; |
3755 | } |
3756 | |
3757 | /* |
3758 | * vmm_fpusave |
3759 | * |
3760 | * Save the guest's FPU state. Must be called with interrupts disabled. |
3761 | */ |
3762 | void |
3763 | vmm_fpusave(struct vcpu *vcpu) |
3764 | { |
3765 | rw_assert_wrlock(&vcpu->vc_lock); |
3766 | |
3767 | if (xsave_mask) { |
3768 | /* Save guest %xcr0 */ |
3769 | vcpu->vc_gueststate.vg_xcr0 = xgetbv(0); |
3770 | |
3771 | /* Restore host %xcr0 */ |
3772 | xsetbv(0, xsave_mask & XFEATURE_XCR0_MASK(0x00000001 | 0x00000002 | 0x00000004 | (0x00000008 | 0x00000010 ) | (0x00000020 | 0x00000040 | 0x00000080) | 0x00000200 | (0x00040000 | 0x00040000))); |
3773 | } |
3774 | |
3775 | /* |
3776 | * Save full copy of FPU state - guest content is always |
3777 | * a subset of host's save area (see xsetbv exit handler) |
3778 | */ |
3779 | fpusavereset(&vcpu->vc_g_fpu); |
3780 | vcpu->vc_fpuinited = 1; |
3781 | } |
3782 | |
3783 | /* |
3784 | * vmm_translate_gva |
3785 | * |
3786 | * Translates a guest virtual address to a guest physical address by walking |
3787 | * the currently active page table (if needed). |
3788 | * |
3789 | * Note - this function can possibly alter the supplied VCPU state. |
3790 | * Specifically, it may inject exceptions depending on the current VCPU |
3791 | * configuration, and may alter %cr2 on #PF. Consequently, this function |
3792 | * should only be used as part of instruction emulation. |
3793 | * |
3794 | * Parameters: |
3795 | * vcpu: The VCPU this translation should be performed for (guest MMU settings |
3796 | * are gathered from this VCPU) |
3797 | * va: virtual address to translate |
3798 | * pa: pointer to paddr_t variable that will receive the translated physical |
3799 | * address. 'pa' is unchanged on error. |
3800 | * mode: one of PROT_READ, PROT_WRITE, PROT_EXEC indicating the mode in which |
3801 | * the address should be translated |
3802 | * |
3803 | * Return values: |
3804 | * 0: the address was successfully translated - 'pa' contains the physical |
3805 | * address currently mapped by 'va'. |
3806 | * EFAULT: the PTE for 'VA' is unmapped. A #PF will be injected in this case |
3807 | * and %cr2 set in the vcpu structure. |
3808 | * EINVAL: an error occurred reading paging table structures |
3809 | */ |
3810 | int |
3811 | vmm_translate_gva(struct vcpu *vcpu, uint64_t va, uint64_t *pa, int mode) |
3812 | { |
3813 | int level, shift, pdidx; |
3814 | uint64_t pte, pt_paddr, pte_paddr, mask, low_mask, high_mask; |
3815 | uint64_t shift_width, pte_size, *hva; |
3816 | paddr_t hpa; |
3817 | struct vcpu_reg_state vrs; |
3818 | |
3819 | level = 0; |
3820 | |
3821 | if (vmm_softc->mode == VMM_MODE_EPT) { |
3822 | if (vcpu_readregs_vmx(vcpu, VM_RWREGS_ALL(0x1 | 0x2 | 0x4 | 0x8 | 0x10), 1, &vrs)) |
3823 | return (EINVAL22); |
3824 | } else if (vmm_softc->mode == VMM_MODE_RVI) { |
3825 | if (vcpu_readregs_svm(vcpu, VM_RWREGS_ALL(0x1 | 0x2 | 0x4 | 0x8 | 0x10), &vrs)) |
3826 | return (EINVAL22); |
3827 | } else { |
3828 | printf("%s: unknown vmm mode", __func__); |
3829 | return (EINVAL22); |
3830 | } |
3831 | |
3832 | DPRINTF("%s: guest %%cr0=0x%llx, %%cr3=0x%llx\n", __func__, |
3833 | vrs.vrs_crs[VCPU_REGS_CR0], vrs.vrs_crs[VCPU_REGS_CR3]); |
3834 | |
3835 | if (!(vrs.vrs_crs[VCPU_REGS_CR00] & CR0_PG0x80000000)) { |
3836 | DPRINTF("%s: unpaged, va=pa=0x%llx\n", __func__, |
3837 | va); |
3838 | *pa = va; |
3839 | return (0); |
3840 | } |
3841 | |
3842 | pt_paddr = vrs.vrs_crs[VCPU_REGS_CR32]; |
3843 | |
3844 | if (vrs.vrs_crs[VCPU_REGS_CR00] & CR0_PE0x00000001) { |
3845 | if (vrs.vrs_crs[VCPU_REGS_CR43] & CR4_PAE0x00000020) { |
3846 | pte_size = sizeof(uint64_t); |
3847 | shift_width = 9; |
3848 | |
3849 | if (vrs.vrs_msrs[VCPU_REGS_EFER0] & EFER_LMA0x00000400) { |
3850 | level = 4; |
3851 | mask = L4_MASK0x0000ff8000000000UL; |
3852 | shift = L4_SHIFT39; |
3853 | } else { |
3854 | level = 3; |
3855 | mask = L3_MASK0x0000007fc0000000UL; |
3856 | shift = L3_SHIFT30; |
3857 | } |
3858 | } else { |
3859 | level = 2; |
3860 | shift_width = 10; |
3861 | mask = 0xFFC00000; |
3862 | shift = 22; |
3863 | pte_size = sizeof(uint32_t); |
3864 | } |
3865 | } else { |
3866 | return (EINVAL22); |
3867 | } |
3868 | |
3869 | DPRINTF("%s: pte size=%lld level=%d mask=0x%llx, shift=%d, " |
3870 | "shift_width=%lld\n", __func__, pte_size, level, mask, shift, |
3871 | shift_width); |
3872 | |
3873 | /* XXX: Check for R bit in segment selector and set A bit */ |
3874 | |
3875 | for (;level > 0; level--) { |
3876 | pdidx = (va & mask) >> shift; |
3877 | pte_paddr = (pt_paddr) + (pdidx * pte_size); |
3878 | |
3879 | DPRINTF("%s: read pte level %d @ GPA 0x%llx\n", __func__, |
3880 | level, pte_paddr); |
3881 | if (!pmap_extract(vcpu->vc_parent->vm_map->pmap, pte_paddr, |
3882 | &hpa)) { |
3883 | DPRINTF("%s: cannot extract HPA for GPA 0x%llx\n", |
3884 | __func__, pte_paddr); |
3885 | return (EINVAL22); |
3886 | } |
3887 | |
3888 | hpa = hpa | (pte_paddr & 0xFFF); |
3889 | hva = (uint64_t *)PMAP_DIRECT_MAP(hpa)((vaddr_t)(((((511 - 4) * (1ULL << 39))) | 0xffff000000000000 )) + (hpa)); |
3890 | DPRINTF("%s: GPA 0x%llx -> HPA 0x%llx -> HVA 0x%llx\n", |
3891 | __func__, pte_paddr, (uint64_t)hpa, (uint64_t)hva); |
3892 | if (pte_size == 8) |
3893 | pte = *hva; |
3894 | else |
3895 | pte = *(uint32_t *)hva; |
3896 | |
3897 | DPRINTF("%s: PTE @ 0x%llx = 0x%llx\n", __func__, pte_paddr, |
3898 | pte); |
3899 | |
3900 | /* XXX: Set CR2 */ |
3901 | if (!(pte & PG_V0x0000000000000001UL)) |
3902 | return (EFAULT14); |
3903 | |
3904 | /* XXX: Check for SMAP */ |
3905 | if ((mode == PROT_WRITE0x02) && !(pte & PG_RW0x0000000000000002UL)) |
3906 | return (EPERM1); |
3907 | |
3908 | if ((vcpu->vc_exit.cpl > 0) && !(pte & PG_u0x0000000000000004UL)) |
3909 | return (EPERM1); |
3910 | |
3911 | pte = pte | PG_U0x0000000000000020UL; |
3912 | if (mode == PROT_WRITE0x02) |
3913 | pte = pte | PG_M0x0000000000000040UL; |
3914 | *hva = pte; |
3915 | |
3916 | /* XXX: EINVAL if in 32bit and PG_PS is 1 but CR4.PSE is 0 */ |
3917 | if (pte & PG_PS0x0000000000000080UL) |
3918 | break; |
3919 | |
3920 | if (level > 1) { |
3921 | pt_paddr = pte & PG_FRAME0x000ffffffffff000UL; |
3922 | shift -= shift_width; |
3923 | mask = mask >> shift_width; |
3924 | } |
3925 | } |
3926 | |
3927 | low_mask = ((uint64_t)1ULL << shift) - 1; |
3928 | high_mask = (((uint64_t)1ULL << ((pte_size * 8) - 1)) - 1) ^ low_mask; |
3929 | *pa = (pte & high_mask) | (va & low_mask); |
3930 | |
3931 | DPRINTF("%s: final GPA for GVA 0x%llx = 0x%llx\n", __func__, |
3932 | va, *pa); |
3933 | |
3934 | return (0); |
3935 | } |
3936 | |
3937 | |
3938 | /* |
3939 | * vcpu_run_vmx |
3940 | * |
3941 | * VMX main loop used to run a VCPU. |
3942 | * |
3943 | * Parameters: |
3944 | * vcpu: The VCPU to run |
3945 | * vrp: run parameters |
3946 | * |
3947 | * Return values: |
3948 | * 0: The run loop exited and no help is needed from vmd |
3949 | * EAGAIN: The run loop exited and help from vmd is needed |
3950 | * EINVAL: an error occurred |
3951 | */ |
3952 | int |
3953 | vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *vrp) |
3954 | { |
3955 | int ret = 0, exitinfo; |
3956 | struct region_descriptor gdt; |
3957 | struct cpu_info *ci = NULL((void *)0); |
3958 | uint64_t exit_reason, cr3, insn_error; |
3959 | struct schedstate_percpu *spc; |
3960 | struct vmx_invvpid_descriptor vid; |
3961 | uint64_t eii, procbased, int_st; |
3962 | uint16_t irq, ldt_sel; |
3963 | u_long s; |
3964 | struct region_descriptor idtr; |
3965 | |
3966 | rw_assert_wrlock(&vcpu->vc_lock); |
3967 | |
3968 | if (vcpu_reload_vmcs_vmx(vcpu)) { |
3969 | printf("%s: failed (re)loading vmcs\n", __func__); |
3970 | return (EINVAL22); |
3971 | } |
3972 | |
3973 | /* |
3974 | * If we are returning from userspace (vmd) because we exited |
3975 | * last time, fix up any needed vcpu state first. Which state |
3976 | * needs to be fixed up depends on what vmd populated in the |
3977 | * exit data structure. |
3978 | */ |
3979 | irq = vrp->vrp_irq; |
3980 | |
3981 | if (vrp->vrp_intr_pending) |
3982 | vcpu->vc_intr = 1; |
3983 | else |
3984 | vcpu->vc_intr = 0; |
3985 | |
3986 | if (vrp->vrp_continue) { |
3987 | switch (vcpu->vc_gueststate.vg_exit_reason) { |
3988 | case VMX_EXIT_IO30: |
3989 | if (vcpu->vc_exit.vei.vei_dir == VEI_DIR_IN) |
3990 | vcpu->vc_gueststate.vg_rax = |
3991 | vcpu->vc_exit.vei.vei_data; |
3992 | vcpu->vc_gueststate.vg_rip = |
3993 | vcpu->vc_exit.vrs.vrs_gprs[VCPU_REGS_RIP16]; |
3994 | if (vmwrite(VMCS_GUEST_IA32_RIP0x681E, |
3995 | vcpu->vc_gueststate.vg_rip)) { |
3996 | printf("%s: failed to update rip\n", __func__); |
3997 | return (EINVAL22); |
3998 | } |
3999 | break; |
4000 | case VMX_EXIT_EPT_VIOLATION48: |
4001 | ret = vcpu_writeregs_vmx(vcpu, VM_RWREGS_GPRS0x1, 0, |
4002 | &vcpu->vc_exit.vrs); |
4003 | if (ret) { |
4004 | printf("%s: vm %d vcpu %d failed to update " |
4005 | "registers\n", __func__, |
4006 | vcpu->vc_parent->vm_id, vcpu->vc_id); |
4007 | return (EINVAL22); |
4008 | } |
4009 | break; |
4010 | case VM_EXIT_NONE0xFFFF: |
4011 | case VMX_EXIT_HLT12: |
4012 | case VMX_EXIT_INT_WINDOW7: |
4013 | case VMX_EXIT_EXTINT1: |
4014 | case VMX_EXIT_CPUID10: |
4015 | case VMX_EXIT_XSETBV55: |
4016 | break; |
4017 | #ifdef VMM_DEBUG |
4018 | case VMX_EXIT_TRIPLE_FAULT2: |
4019 | DPRINTF("%s: vm %d vcpu %d triple fault\n", |
4020 | __func__, vcpu->vc_parent->vm_id, |
4021 | vcpu->vc_id); |
4022 | vmx_vcpu_dump_regs(vcpu); |
4023 | dump_vcpu(vcpu); |
4024 | vmx_dump_vmcs(vcpu); |
4025 | break; |
4026 | case VMX_EXIT_ENTRY_FAILED_GUEST_STATE33: |
4027 | DPRINTF("%s: vm %d vcpu %d failed entry " |
4028 | "due to invalid guest state\n", |
4029 | __func__, vcpu->vc_parent->vm_id, |
4030 | vcpu->vc_id); |
4031 | vmx_vcpu_dump_regs(vcpu); |
4032 | dump_vcpu(vcpu); |
4033 | return (EINVAL22); |
4034 | default: |
4035 | DPRINTF("%s: unimplemented exit type %d (%s)\n", |
4036 | __func__, |
4037 | vcpu->vc_gueststate.vg_exit_reason, |
4038 | vmx_exit_reason_decode( |
4039 | vcpu->vc_gueststate.vg_exit_reason)); |
4040 | vmx_vcpu_dump_regs(vcpu); |
4041 | dump_vcpu(vcpu); |
4042 | break; |
4043 | #endif /* VMM_DEBUG */ |
4044 | } |
4045 | memset(&vcpu->vc_exit, 0, sizeof(vcpu->vc_exit))__builtin_memset((&vcpu->vc_exit), (0), (sizeof(vcpu-> vc_exit))); |
4046 | } |
4047 | |
4048 | /* Host CR3 */ |
4049 | cr3 = rcr3(); |
4050 | if (vmwrite(VMCS_HOST_IA32_CR30x6C02, cr3)) { |
4051 | printf("%s: vmwrite(0x%04X, 0x%llx)\n", __func__, |
4052 | VMCS_HOST_IA32_CR30x6C02, cr3); |
4053 | return (EINVAL22); |
4054 | } |
4055 | |
4056 | /* Handle vmd(8) injected interrupts */ |
4057 | /* Is there an interrupt pending injection? */ |
4058 | if (irq != 0xFFFF) { |
4059 | if (vmread(VMCS_GUEST_INTERRUPTIBILITY_ST0x4824, &int_st)) { |
4060 | printf("%s: can't get interruptibility state\n", |
4061 | __func__); |
4062 | return (EINVAL22); |
4063 | } |
4064 | |
4065 | /* Interruptibility state 0x3 covers NMIs and STI */ |
4066 | if (!(int_st & 0x3) && vcpu->vc_irqready) { |
4067 | eii = (irq & 0xFF); |
4068 | eii |= (1ULL << 31); /* Valid */ |
4069 | eii |= (0ULL << 8); /* Hardware Interrupt */ |
4070 | if (vmwrite(VMCS_ENTRY_INTERRUPTION_INFO0x4016, eii)) { |
4071 | printf("vcpu_run_vmx: can't vector " |
4072 | "interrupt to guest\n"); |
4073 | return (EINVAL22); |
4074 | } |
4075 | |
4076 | irq = 0xFFFF; |
Value stored to 'irq' is never read | |
4077 | } |
4078 | } else if (!vcpu->vc_intr) { |
4079 | /* |
4080 | * Disable window exiting |
4081 | */ |
4082 | if (vmread(VMCS_PROCBASED_CTLS0x4002, &procbased)) { |
4083 | printf("%s: can't read procbased ctls on exit\n", |
4084 | __func__); |
4085 | return (EINVAL22); |
4086 | } else { |
4087 | procbased &= ~IA32_VMX_INTERRUPT_WINDOW_EXITING(1ULL << 2); |
4088 | if (vmwrite(VMCS_PROCBASED_CTLS0x4002, procbased)) { |
4089 | printf("%s: can't write procbased ctls " |
4090 | "on exit\n", __func__); |
4091 | return (EINVAL22); |
4092 | } |
4093 | } |
4094 | } |
4095 | |
4096 | while (ret == 0) { |
4097 | #ifdef VMM_DEBUG |
4098 | paddr_t pa = 0ULL; |
4099 | vmptrst(&pa); |
4100 | KASSERT(pa == vcpu->vc_control_pa)((pa == vcpu->vc_control_pa) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/arch/amd64/amd64/vmm_machdep.c", 4100, "pa == vcpu->vc_control_pa" )); |
4101 | #endif /* VMM_DEBUG */ |
4102 | |
4103 | vmm_update_pvclock(vcpu); |
4104 | |
4105 | if (ci != curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})) { |
4106 | ci = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;}); |
4107 | vcpu->vc_last_pcpu = ci; |
4108 | |
4109 | setregion(&gdt, ci->ci_gdt, GDT_SIZE((6 << 3) + (1 << 4)) - 1); |
4110 | if (gdt.rd_base == 0) { |
4111 | printf("%s: setregion\n", __func__); |
4112 | return (EINVAL22); |
4113 | } |
4114 | |
4115 | /* Host GDTR base */ |
4116 | if (vmwrite(VMCS_HOST_IA32_GDTR_BASE0x6C0C, gdt.rd_base)) { |
4117 | printf("%s: vmwrite(0x%04X, 0x%llx)\n", |
4118 | __func__, VMCS_HOST_IA32_GDTR_BASE0x6C0C, |
4119 | gdt.rd_base); |
4120 | return (EINVAL22); |
4121 | } |
4122 | |
4123 | /* Host TR base */ |
4124 | if (vmwrite(VMCS_HOST_IA32_TR_BASE0x6C0A, |
4125 | (uint64_t)ci->ci_tss)) { |
4126 | printf("%s: vmwrite(0x%04X, 0x%llx)\n", |
4127 | __func__, VMCS_HOST_IA32_TR_BASE0x6C0A, |
4128 | (uint64_t)ci->ci_tss); |
4129 | return (EINVAL22); |
4130 | } |
4131 | } |
4132 | |
4133 | /* Inject event if present */ |
4134 | if (vcpu->vc_event != 0) { |
4135 | eii = (vcpu->vc_event & 0xFF); |
4136 | eii |= (1ULL << 31); /* Valid */ |
4137 | |
4138 | /* Set the "Send error code" flag for certain vectors */ |
4139 | switch (vcpu->vc_event & 0xFF) { |
4140 | case VMM_EX_DF8: |
4141 | case VMM_EX_TS10: |
4142 | case VMM_EX_NP11: |
4143 | case VMM_EX_SS12: |
4144 | case VMM_EX_GP13: |
4145 | case VMM_EX_PF14: |
4146 | case VMM_EX_AC17: |
4147 | eii |= (1ULL << 11); |
4148 | } |
4149 | |
4150 | eii |= (3ULL << 8); /* Hardware Exception */ |
4151 | if (vmwrite(VMCS_ENTRY_INTERRUPTION_INFO0x4016, eii)) { |
4152 | printf("%s: can't vector event to guest\n", |
4153 | __func__); |
4154 | ret = EINVAL22; |
4155 | break; |
4156 | } |
4157 | |
4158 | if (vmwrite(VMCS_ENTRY_EXCEPTION_ERROR_CODE0x4018, 0)) { |
4159 | printf("%s: can't write error code to guest\n", |
4160 | __func__); |
4161 | ret = EINVAL22; |
4162 | break; |
4163 | } |
4164 | |
4165 | vcpu->vc_event = 0; |
4166 | } |
4167 | |
4168 | if (vcpu->vc_vmx_vpid_enabled) { |
4169 | /* Invalidate old TLB mappings */ |
4170 | vid.vid_vpid = vcpu->vc_vpid; |
4171 | vid.vid_addr = 0; |
4172 | invvpid(IA32_VMX_INVVPID_SINGLE_CTX_GLB0x3, &vid); |
4173 | } |
4174 | |
4175 | /* Start / resume the VCPU */ |
4176 | |
4177 | /* Disable interrupts and save the current host FPU state. */ |
4178 | s = intr_disable(); |
4179 | if ((ret = vmm_fpurestore(vcpu))) { |
4180 | intr_restore(s); |
4181 | break; |
4182 | } |
4183 | |
4184 | sidt(&idtr); |
4185 | sldt(&ldt_sel); |
4186 | |
4187 | TRACEPOINT(vmm, guest_enter, vcpu, vrp)do { extern struct dt_probe (dt_static_vmm_guest_enter); struct dt_probe *dtp = &(dt_static_vmm_guest_enter); if (__builtin_expect (((dt_tracing) != 0), 0) && __builtin_expect(((dtp-> dtp_recording) != 0), 0)) { struct dt_provider *dtpv = dtp-> dtp_prov; dtpv->dtpv_enter(dtpv, dtp, vcpu, vrp); } } while (0); |
4188 | |
4189 | /* Restore any guest PKRU state. */ |
4190 | if (vmm_softc->sc_md.pkru_enabled) |
4191 | wrpkru(vcpu->vc_pkru); |
4192 | |
4193 | ret = vmx_enter_guest(&vcpu->vc_control_pa, |
4194 | &vcpu->vc_gueststate, |
4195 | (vcpu->vc_vmx_vmcs_state == VMCS_LAUNCHED1), |
4196 | ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr); |
4197 | |
4198 | /* Restore host PKRU state. */ |
4199 | if (vmm_softc->sc_md.pkru_enabled) { |
4200 | vcpu->vc_pkru = rdpkru(0); |
4201 | wrpkru(PGK_VALUE0xfffffffc); |
4202 | } |
4203 | |
4204 | lidt(&idtr); |
4205 | lldt(ldt_sel); |
4206 | |
4207 | /* |
4208 | * On exit, interrupts are disabled, and we are running with |
4209 | * the guest FPU state still possibly on the CPU. Save the FPU |
4210 | * state before re-enabling interrupts. |
4211 | */ |
4212 | vmm_fpusave(vcpu); |
4213 | intr_restore(s); |
4214 | |
4215 | atomic_swap_uint(&vcpu->vc_vmx_vmcs_state, VMCS_LAUNCHED)_atomic_swap_uint((&vcpu->vc_vmx_vmcs_state), (1)); |
4216 | exit_reason = VM_EXIT_NONE0xFFFF; |
4217 | |
4218 | /* If we exited successfully ... */ |
4219 | if (ret == 0) { |
4220 | exitinfo = vmx_get_exit_info( |
4221 | &vcpu->vc_gueststate.vg_rip, &exit_reason); |
4222 | if (!(exitinfo & VMX_EXIT_INFO_HAVE_RIP0x1)) { |
4223 | printf("%s: cannot read guest rip\n", __func__); |
4224 | ret = EINVAL22; |
4225 | break; |
4226 | } |
4227 | if (!(exitinfo & VMX_EXIT_INFO_HAVE_REASON0x2)) { |
4228 | printf("%s: cant read exit reason\n", __func__); |
4229 | ret = EINVAL22; |
4230 | break; |
4231 | } |
4232 | vcpu->vc_gueststate.vg_exit_reason = exit_reason; |
4233 | TRACEPOINT(vmm, guest_exit, vcpu, vrp, exit_reason)do { extern struct dt_probe (dt_static_vmm_guest_exit); struct dt_probe *dtp = &(dt_static_vmm_guest_exit); if (__builtin_expect (((dt_tracing) != 0), 0) && __builtin_expect(((dtp-> dtp_recording) != 0), 0)) { struct dt_provider *dtpv = dtp-> dtp_prov; dtpv->dtpv_enter(dtpv, dtp, vcpu, vrp, exit_reason ); } } while (0); |
4234 | |
4235 | /* Update our state */ |
4236 | if (vmread(VMCS_GUEST_IA32_RFLAGS0x6820, |
4237 | &vcpu->vc_gueststate.vg_rflags)) { |
4238 | printf("%s: can't read guest rflags during " |
4239 | "exit\n", __func__); |
4240 | ret = EINVAL22; |
4241 | break; |
4242 | } |
4243 | |
4244 | /* |
4245 | * Handle the exit. This will alter "ret" to EAGAIN if |
4246 | * the exit handler determines help from vmd is needed. |
4247 | */ |
4248 | ret = vmx_handle_exit(vcpu); |
4249 | |
4250 | if (vcpu->vc_gueststate.vg_rflags & PSL_I0x00000200) |
4251 | vcpu->vc_irqready = 1; |
4252 | else |
4253 | vcpu->vc_irqready = 0; |
4254 | |
4255 | /* |
4256 | * If not ready for interrupts, but interrupts pending, |
4257 | * enable interrupt window exiting. |
4258 | */ |
4259 | if (vcpu->vc_irqready == 0 && vcpu->vc_intr) { |
4260 | if (vmread(VMCS_PROCBASED_CTLS0x4002, &procbased)) { |
4261 | printf("%s: can't read procbased ctls " |
4262 | "on intwin exit\n", __func__); |
4263 | ret = EINVAL22; |
4264 | break; |
4265 | } |
4266 | |
4267 | procbased |= IA32_VMX_INTERRUPT_WINDOW_EXITING(1ULL << 2); |
4268 | if (vmwrite(VMCS_PROCBASED_CTLS0x4002, procbased)) { |
4269 | printf("%s: can't write procbased ctls " |
4270 | "on intwin exit\n", __func__); |
4271 | ret = EINVAL22; |
4272 | break; |
4273 | } |
4274 | } |
4275 | |
4276 | /* |
4277 | * Exit to vmd if we are terminating, failed to enter, |
4278 | * or need help (device I/O) |
4279 | */ |
4280 | if (ret || vcpu_must_stop(vcpu)) |
4281 | break; |
4282 | |
4283 | if (vcpu->vc_intr && vcpu->vc_irqready) { |
4284 | ret = EAGAIN35; |
4285 | break; |
4286 | } |
4287 | |
4288 | /* Check if we should yield - don't hog the {p,v}pu */ |
4289 | spc = &ci->ci_schedstate; |
4290 | if (spc->spc_schedflags & SPCF_SHOULDYIELD0x0002) |
4291 | break; |
4292 | |
4293 | } else { |
4294 | /* |
4295 | * We failed vmresume or vmlaunch for some reason, |
4296 | * typically due to invalid vmcs state or other |
4297 | * reasons documented in SDM Vol 3C 30.4. |
4298 | */ |
4299 | switch (ret) { |
4300 | case VMX_FAIL_LAUNCH_INVALID_VMCS2: |
4301 | printf("%s: failed %s with invalid vmcs\n", |
4302 | __func__, |
4303 | (vcpu->vc_vmx_vmcs_state == VMCS_LAUNCHED1 |
4304 | ? "vmresume" : "vmlaunch")); |
4305 | break; |
4306 | case VMX_FAIL_LAUNCH_VALID_VMCS3: |
4307 | printf("%s: failed %s with valid vmcs\n", |
4308 | __func__, |
4309 | (vcpu->vc_vmx_vmcs_state == VMCS_LAUNCHED1 |
4310 | ? "vmresume" : "vmlaunch")); |
4311 | break; |
4312 | default: |
4313 | printf("%s: failed %s for unknown reason\n", |
4314 | __func__, |
4315 | (vcpu->vc_vmx_vmcs_state == VMCS_LAUNCHED1 |
4316 | ? "vmresume" : "vmlaunch")); |
4317 | } |
4318 | |
4319 | ret = EINVAL22; |
4320 | |
4321 | /* Try to translate a vmfail error code, if possible. */ |
4322 | if (vmread(VMCS_INSTRUCTION_ERROR0x4400, &insn_error)) { |
4323 | printf("%s: can't read insn error field\n", |
4324 | __func__); |
4325 | } else |
4326 | printf("%s: error code = %lld, %s\n", __func__, |
4327 | insn_error, |
4328 | vmx_instruction_error_decode(insn_error)); |
4329 | #ifdef VMM_DEBUG |
4330 | vmx_vcpu_dump_regs(vcpu); |
4331 | dump_vcpu(vcpu); |
4332 | #endif /* VMM_DEBUG */ |
4333 | } |
4334 | } |
4335 | |
4336 | vcpu->vc_last_pcpu = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;}); |
4337 | |
4338 | /* Copy the VCPU register state to the exit structure */ |
4339 | if (vcpu_readregs_vmx(vcpu, VM_RWREGS_ALL(0x1 | 0x2 | 0x4 | 0x8 | 0x10), 0, &vcpu->vc_exit.vrs)) |
4340 | ret = EINVAL22; |
4341 | vcpu->vc_exit.cpl = vmm_get_guest_cpu_cpl(vcpu); |
4342 | |
4343 | return (ret); |
4344 | } |
4345 | |
4346 | /* |
4347 | * vmx_handle_intr |
4348 | * |
4349 | * Handle host (external) interrupts. We read which interrupt fired by |
4350 | * extracting the vector from the VMCS and dispatch the interrupt directly |
4351 | * to the host using vmm_dispatch_intr. |
4352 | */ |
4353 | void |
4354 | vmx_handle_intr(struct vcpu *vcpu) |
4355 | { |
4356 | uint8_t vec; |
4357 | uint64_t eii; |
4358 | struct gate_descriptor *idte; |
4359 | vaddr_t handler; |
4360 | |
4361 | if (vmread(VMCS_EXIT_INTERRUPTION_INFO0x4404, &eii)) { |
4362 | printf("%s: can't obtain intr info\n", __func__); |
4363 | return; |
4364 | } |
4365 | |
4366 | vec = eii & 0xFF; |
4367 | |
4368 | /* XXX check "error valid" code in eii, abort if 0 */ |
4369 | idte=&idt[vec]; |
4370 | handler = idte->gd_looffset + ((uint64_t)idte->gd_hioffset << 16); |
4371 | vmm_dispatch_intr(handler); |
4372 | } |
4373 | |
4374 | /* |
4375 | * svm_handle_hlt |
4376 | * |
4377 | * Handle HLT exits |
4378 | * |
4379 | * Parameters |
4380 | * vcpu: The VCPU that executed the HLT instruction |
4381 | * |
4382 | * Return Values: |
4383 | * EIO: The guest halted with interrupts disabled |
4384 | * EAGAIN: Normal return to vmd - vmd should halt scheduling this VCPU |
4385 | * until a virtual interrupt is ready to inject |
4386 | */ |
4387 | int |
4388 | svm_handle_hlt(struct vcpu *vcpu) |
4389 | { |
4390 | struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va; |
4391 | uint64_t rflags = vmcb->v_rflags; |
4392 | |
4393 | /* All HLT insns are 1 byte */ |
4394 | vcpu->vc_gueststate.vg_rip += 1; |
4395 | |
4396 | if (!(rflags & PSL_I0x00000200)) { |
4397 | DPRINTF("%s: guest halted with interrupts disabled\n", |
4398 | __func__); |
4399 | return (EIO5); |
4400 | } |
4401 | |
4402 | return (EAGAIN35); |
4403 | } |
4404 | |
4405 | /* |
4406 | * vmx_handle_hlt |
4407 | * |
4408 | * Handle HLT exits. HLTing the CPU with interrupts disabled will terminate |
4409 | * the guest (no NMIs handled) by returning EIO to vmd. |
4410 | * |
4411 | * Parameters: |
4412 | * vcpu: The VCPU that executed the HLT instruction |
4413 | * |
4414 | * Return Values: |
4415 | * EINVAL: An error occurred extracting information from the VMCS, or an |
4416 | * invalid HLT instruction was encountered |
4417 | * EIO: The guest halted with interrupts disabled |
4418 | * EAGAIN: Normal return to vmd - vmd should halt scheduling this VCPU |
4419 | * until a virtual interrupt is ready to inject |
4420 | * |
4421 | */ |
4422 | int |
4423 | vmx_handle_hlt(struct vcpu *vcpu) |
4424 | { |
4425 | uint64_t insn_length, rflags; |
4426 | |
4427 | if (vmread(VMCS_INSTRUCTION_LENGTH0x440C, &insn_length)) { |
4428 | printf("%s: can't obtain instruction length\n", __func__); |
4429 | return (EINVAL22); |
4430 | } |
4431 | |
4432 | if (vmread(VMCS_GUEST_IA32_RFLAGS0x6820, &rflags)) { |
4433 | printf("%s: can't obtain guest rflags\n", __func__); |
4434 | return (EINVAL22); |
4435 | } |
4436 | |
4437 | if (insn_length != 1) { |
4438 | DPRINTF("%s: HLT with instruction length %lld not supported\n", |
4439 | __func__, insn_length); |
4440 | return (EINVAL22); |
4441 | } |
4442 | |
4443 | if (!(rflags & PSL_I0x00000200)) { |
4444 | DPRINTF("%s: guest halted with interrupts disabled\n", |
4445 | __func__); |
4446 | return (EIO5); |
4447 | } |
4448 | |
4449 | vcpu->vc_gueststate.vg_rip += insn_length; |
4450 | return (EAGAIN35); |
4451 | } |
4452 | |
4453 | /* |
4454 | * vmx_get_exit_info |
4455 | * |
4456 | * Returns exit information containing the current guest RIP and exit reason |
4457 | * in rip and exit_reason. The return value is a bitmask indicating whether |
4458 | * reading the RIP and exit reason was successful. |
4459 | */ |
4460 | int |
4461 | vmx_get_exit_info(uint64_t *rip, uint64_t *exit_reason) |
4462 | { |
4463 | int rv = 0; |
4464 | |
4465 | if (vmread(VMCS_GUEST_IA32_RIP0x681E, rip) == 0) { |
4466 | rv |= VMX_EXIT_INFO_HAVE_RIP0x1; |
4467 | if (vmread(VMCS_EXIT_REASON0x4402, exit_reason) == 0) |
4468 | rv |= VMX_EXIT_INFO_HAVE_REASON0x2; |
4469 | } |
4470 | return (rv); |
4471 | } |
4472 | |
4473 | /* |
4474 | * svm_handle_exit |
4475 | * |
4476 | * Handle exits from the VM by decoding the exit reason and calling various |
4477 | * subhandlers as needed. |
4478 | */ |
4479 | int |
4480 | svm_handle_exit(struct vcpu *vcpu) |
4481 | { |
4482 | uint64_t exit_reason, rflags; |
4483 | int update_rip, ret = 0; |
4484 | struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va; |
4485 | |
4486 | update_rip = 0; |
4487 | exit_reason = vcpu->vc_gueststate.vg_exit_reason; |
4488 | rflags = vcpu->vc_gueststate.vg_rflags; |
4489 | |
4490 | switch (exit_reason) { |
4491 | case SVM_VMEXIT_VINTR0x64: |
4492 | if (!(rflags & PSL_I0x00000200)) { |
4493 | DPRINTF("%s: impossible interrupt window exit " |
4494 | "config\n", __func__); |
4495 | ret = EINVAL22; |
4496 | break; |
4497 | } |
4498 | |
4499 | /* |
4500 | * Guest is now ready for interrupts, so disable interrupt |
4501 | * window exiting. |
4502 | */ |
4503 | vmcb->v_irq = 0; |
4504 | vmcb->v_intr_vector = 0; |
4505 | vmcb->v_intercept1 &= ~SVM_INTERCEPT_VINTR(1UL << 4); |
4506 | svm_set_dirty(vcpu, SVM_CLEANBITS_TPR(1 << 3) | SVM_CLEANBITS_I(1 << 0)); |
4507 | |
4508 | update_rip = 0; |
4509 | break; |
4510 | case SVM_VMEXIT_INTR0x60: |
4511 | update_rip = 0; |
4512 | break; |
4513 | case SVM_VMEXIT_SHUTDOWN0x7F: |
4514 | update_rip = 0; |
4515 | ret = EAGAIN35; |
4516 | break; |
4517 | case SVM_VMEXIT_NPF0x400: |
4518 | ret = svm_handle_np_fault(vcpu); |
4519 | break; |
4520 | case SVM_VMEXIT_CPUID0x72: |
4521 | ret = vmm_handle_cpuid(vcpu); |
4522 | update_rip = 1; |
4523 | break; |
4524 | case SVM_VMEXIT_MSR0x7C: |
4525 | ret = svm_handle_msr(vcpu); |
4526 | update_rip = 1; |
4527 | break; |
4528 | case SVM_VMEXIT_XSETBV0x8D: |
4529 | ret = svm_handle_xsetbv(vcpu); |
4530 | update_rip = 1; |
4531 | break; |
4532 | case SVM_VMEXIT_IOIO0x7B: |
4533 | if (svm_handle_inout(vcpu) == 0) |
4534 | ret = EAGAIN35; |
4535 | break; |
4536 | case SVM_VMEXIT_HLT0x78: |
4537 | ret = svm_handle_hlt(vcpu); |
4538 | update_rip = 1; |
4539 | break; |
4540 | case SVM_VMEXIT_MWAIT0x8B: |
4541 | case SVM_VMEXIT_MWAIT_CONDITIONAL0x8C: |
4542 | case SVM_VMEXIT_MONITOR0x8A: |
4543 | case SVM_VMEXIT_VMRUN0x80: |
4544 | case SVM_VMEXIT_VMMCALL0x81: |
4545 | case SVM_VMEXIT_VMLOAD0x82: |
4546 | case SVM_VMEXIT_VMSAVE0x83: |
4547 | case SVM_VMEXIT_STGI0x84: |
4548 | case SVM_VMEXIT_CLGI0x85: |
4549 | case SVM_VMEXIT_SKINIT0x86: |
4550 | case SVM_VMEXIT_RDTSCP0x87: |
4551 | case SVM_VMEXIT_ICEBP0x88: |
4552 | case SVM_VMEXIT_INVLPGA0x7A: |
4553 | ret = vmm_inject_ud(vcpu); |
4554 | update_rip = 0; |
4555 | break; |
4556 | default: |
4557 | DPRINTF("%s: unhandled exit 0x%llx (pa=0x%llx)\n", __func__, |
4558 | exit_reason, (uint64_t)vcpu->vc_control_pa); |
4559 | return (EINVAL22); |
4560 | } |
4561 | |
4562 | if (update_rip) { |
4563 | vmcb->v_rip = vcpu->vc_gueststate.vg_rip; |
4564 | |
4565 | if (rflags & PSL_T0x00000100) { |
4566 | if (vmm_inject_db(vcpu)) { |
4567 | printf("%s: can't inject #DB exception to " |
4568 | "guest", __func__); |
4569 | return (EINVAL22); |
4570 | } |
4571 | } |
4572 | } |
4573 | |
4574 | /* Enable SVME in EFER (must always be set) */ |
4575 | vmcb->v_efer |= EFER_SVME0x00001000; |
4576 | svm_set_dirty(vcpu, SVM_CLEANBITS_CR(1 << 5)); |
4577 | |
4578 | return (ret); |
4579 | } |
4580 | |
4581 | /* |
4582 | * vmx_handle_exit |
4583 | * |
4584 | * Handle exits from the VM by decoding the exit reason and calling various |
4585 | * subhandlers as needed. |
4586 | */ |
4587 | int |
4588 | vmx_handle_exit(struct vcpu *vcpu) |
4589 | { |
4590 | uint64_t exit_reason, rflags, istate; |
4591 | int update_rip, ret = 0; |
4592 | |
4593 | update_rip = 0; |
4594 | exit_reason = vcpu->vc_gueststate.vg_exit_reason; |
4595 | rflags = vcpu->vc_gueststate.vg_rflags; |
4596 | |
4597 | switch (exit_reason) { |
4598 | case VMX_EXIT_INT_WINDOW7: |
4599 | if (!(rflags & PSL_I0x00000200)) { |
4600 | DPRINTF("%s: impossible interrupt window exit " |
4601 | "config\n", __func__); |
4602 | ret = EINVAL22; |
4603 | break; |
4604 | } |
4605 | |
4606 | ret = EAGAIN35; |
4607 | update_rip = 0; |
4608 | break; |
4609 | case VMX_EXIT_EPT_VIOLATION48: |
4610 | ret = vmx_handle_np_fault(vcpu); |
4611 | break; |
4612 | case VMX_EXIT_CPUID10: |
4613 | ret = vmm_handle_cpuid(vcpu); |
4614 | update_rip = 1; |
4615 | break; |
4616 | case VMX_EXIT_IO30: |
4617 | if (vmx_handle_inout(vcpu) == 0) |
4618 | ret = EAGAIN35; |
4619 | break; |
4620 | case VMX_EXIT_EXTINT1: |
4621 | vmx_handle_intr(vcpu); |
4622 | update_rip = 0; |
4623 | break; |
4624 | case VMX_EXIT_CR_ACCESS28: |
4625 | ret = vmx_handle_cr(vcpu); |
4626 | update_rip = 1; |
4627 | break; |
4628 | case VMX_EXIT_HLT12: |
4629 | ret = vmx_handle_hlt(vcpu); |
4630 | update_rip = 1; |
4631 | break; |
4632 | case VMX_EXIT_RDMSR31: |
4633 | ret = vmx_handle_rdmsr(vcpu); |
4634 | update_rip = 1; |
4635 | break; |
4636 | case VMX_EXIT_WRMSR32: |
4637 | ret = vmx_handle_wrmsr(vcpu); |
4638 | update_rip = 1; |
4639 | break; |
4640 | case VMX_EXIT_XSETBV55: |
4641 | ret = vmx_handle_xsetbv(vcpu); |
4642 | update_rip = 1; |
4643 | break; |
4644 | case VMX_EXIT_MWAIT36: |
4645 | case VMX_EXIT_MONITOR39: |
4646 | case VMX_EXIT_VMXON27: |
4647 | case VMX_EXIT_VMWRITE25: |
4648 | case VMX_EXIT_VMREAD23: |
4649 | case VMX_EXIT_VMLAUNCH20: |
4650 | case VMX_EXIT_VMRESUME24: |
4651 | case VMX_EXIT_VMPTRLD21: |
4652 | case VMX_EXIT_VMPTRST22: |
4653 | case VMX_EXIT_VMCLEAR19: |
4654 | case VMX_EXIT_VMCALL18: |
4655 | case VMX_EXIT_VMFUNC59: |
4656 | case VMX_EXIT_VMXOFF26: |
4657 | case VMX_EXIT_INVVPID53: |
4658 | case VMX_EXIT_INVEPT50: |
4659 | ret = vmm_inject_ud(vcpu); |
4660 | update_rip = 0; |
4661 | break; |
4662 | case VMX_EXIT_TRIPLE_FAULT2: |
4663 | #ifdef VMM_DEBUG |
4664 | DPRINTF("%s: vm %d vcpu %d triple fault\n", __func__, |
4665 | vcpu->vc_parent->vm_id, vcpu->vc_id); |
4666 | vmx_vcpu_dump_regs(vcpu); |
4667 | dump_vcpu(vcpu); |
4668 | vmx_dump_vmcs(vcpu); |
4669 | #endif /* VMM_DEBUG */ |
4670 | ret = EAGAIN35; |
4671 | update_rip = 0; |
4672 | break; |
4673 | default: |
4674 | #ifdef VMM_DEBUG |
4675 | DPRINTF("%s: unhandled exit 0x%llx (%s)\n", __func__, |
4676 | exit_reason, vmx_exit_reason_decode(exit_reason)); |
4677 | #endif /* VMM_DEBUG */ |
4678 | return (EINVAL22); |
4679 | } |
4680 | |
4681 | if (update_rip) { |
4682 | if (vmwrite(VMCS_GUEST_IA32_RIP0x681E, |
4683 | vcpu->vc_gueststate.vg_rip)) { |
4684 | printf("%s: can't advance rip\n", __func__); |
4685 | return (EINVAL22); |
4686 | } |
4687 | |
4688 | if (vmread(VMCS_GUEST_INTERRUPTIBILITY_ST0x4824, |
4689 | &istate)) { |
4690 | printf("%s: can't read interruptibility state\n", |
4691 | __func__); |
4692 | return (EINVAL22); |
4693 | } |
4694 | |
4695 | /* Interruptibility state 0x3 covers NMIs and STI */ |
4696 | istate &= ~0x3; |
4697 | |
4698 | if (vmwrite(VMCS_GUEST_INTERRUPTIBILITY_ST0x4824, |
4699 | istate)) { |
4700 | printf("%s: can't write interruptibility state\n", |
4701 | __func__); |
4702 | return (EINVAL22); |
4703 | } |
4704 | |
4705 | if (rflags & PSL_T0x00000100) { |
4706 | if (vmm_inject_db(vcpu)) { |
4707 | printf("%s: can't inject #DB exception to " |
4708 | "guest", __func__); |
4709 | return (EINVAL22); |
4710 | } |
4711 | } |
4712 | } |
4713 | |
4714 | return (ret); |
4715 | } |
4716 | |
4717 | /* |
4718 | * vmm_inject_gp |
4719 | * |
4720 | * Injects an #GP exception into the guest VCPU. |
4721 | * |
4722 | * Parameters: |
4723 | * vcpu: vcpu to inject into |
4724 | * |
4725 | * Return values: |
4726 | * Always 0 |
4727 | */ |
4728 | int |
4729 | vmm_inject_gp(struct vcpu *vcpu) |
4730 | { |
4731 | DPRINTF("%s: injecting #GP at guest %%rip 0x%llx\n", __func__, |
4732 | vcpu->vc_gueststate.vg_rip); |
4733 | vcpu->vc_event = VMM_EX_GP13; |
4734 | |
4735 | return (0); |
4736 | } |
4737 | |
4738 | /* |
4739 | * vmm_inject_ud |
4740 | * |
4741 | * Injects an #UD exception into the guest VCPU. |
4742 | * |
4743 | * Parameters: |
4744 | * vcpu: vcpu to inject into |
4745 | * |
4746 | * Return values: |
4747 | * Always 0 |
4748 | */ |
4749 | int |
4750 | vmm_inject_ud(struct vcpu *vcpu) |
4751 | { |
4752 | DPRINTF("%s: injecting #UD at guest %%rip 0x%llx\n", __func__, |
4753 | vcpu->vc_gueststate.vg_rip); |
4754 | vcpu->vc_event = VMM_EX_UD6; |
4755 | |
4756 | return (0); |
4757 | } |
4758 | |
4759 | /* |
4760 | * vmm_inject_db |
4761 | * |
4762 | * Injects a #DB exception into the guest VCPU. |
4763 | * |
4764 | * Parameters: |
4765 | * vcpu: vcpu to inject into |
4766 | * |
4767 | * Return values: |
4768 | * Always 0 |
4769 | */ |
4770 | int |
4771 | vmm_inject_db(struct vcpu *vcpu) |
4772 | { |
4773 | DPRINTF("%s: injecting #DB at guest %%rip 0x%llx\n", __func__, |
4774 | vcpu->vc_gueststate.vg_rip); |
4775 | vcpu->vc_event = VMM_EX_DB1; |
4776 | |
4777 | return (0); |
4778 | } |
4779 | |
4780 | /* |
4781 | * vmm_get_guest_memtype |
4782 | * |
4783 | * Returns the type of memory 'gpa' refers to in the context of vm 'vm' |
4784 | */ |
4785 | int |
4786 | vmm_get_guest_memtype(struct vm *vm, paddr_t gpa) |
4787 | { |
4788 | int i; |
4789 | struct vm_mem_range *vmr; |
4790 | |
4791 | /* XXX Use binary search? */ |
4792 | for (i = 0; i < vm->vm_nmemranges; i++) { |
4793 | vmr = &vm->vm_memranges[i]; |
4794 | |
4795 | /* |
4796 | * vm_memranges are ascending. gpa can no longer be in one of |
4797 | * the memranges |
4798 | */ |
4799 | if (gpa < vmr->vmr_gpa) |
4800 | break; |
4801 | |
4802 | if (gpa < vmr->vmr_gpa + vmr->vmr_size) { |
4803 | if (vmr->vmr_type == VM_MEM_MMIO2) |
4804 | return (VMM_MEM_TYPE_MMIO); |
4805 | return (VMM_MEM_TYPE_REGULAR); |
4806 | } |
4807 | } |
4808 | |
4809 | DPRINTF("guest memtype @ 0x%llx unknown\n", (uint64_t)gpa); |
4810 | return (VMM_MEM_TYPE_UNKNOWN); |
4811 | } |
4812 | |
4813 | /* |
4814 | * vmx_get_exit_qualification |
4815 | * |
4816 | * Return the current VMCS' exit qualification information |
4817 | */ |
4818 | int |
4819 | vmx_get_exit_qualification(uint64_t *exit_qualification) |
4820 | { |
4821 | if (vmread(VMCS_GUEST_EXIT_QUALIFICATION0x6400, exit_qualification)) { |
4822 | printf("%s: can't extract exit qual\n", __func__); |
4823 | return (EINVAL22); |
4824 | } |
4825 | |
4826 | return (0); |
4827 | } |
4828 | |
4829 | /* |
4830 | * vmx_get_guest_faulttype |
4831 | * |
4832 | * Determines the type (R/W/X) of the last fault on the VCPU last run on |
4833 | * this PCPU. |
4834 | */ |
4835 | int |
4836 | vmx_get_guest_faulttype(void) |
4837 | { |
4838 | uint64_t exit_qual; |
4839 | uint64_t presentmask = IA32_VMX_EPT_FAULT_WAS_READABLE(1ULL << 3) | |
4840 | IA32_VMX_EPT_FAULT_WAS_WRITABLE(1ULL << 4) | IA32_VMX_EPT_FAULT_WAS_EXECABLE(1ULL << 5); |
4841 | vm_prot_t prot, was_prot; |
4842 | |
4843 | if (vmx_get_exit_qualification(&exit_qual)) |
4844 | return (-1); |
4845 | |
4846 | if ((exit_qual & presentmask) == 0) |
4847 | return VM_FAULT_INVALID((vm_fault_t) 0x0); |
4848 | |
4849 | was_prot = 0; |
4850 | if (exit_qual & IA32_VMX_EPT_FAULT_WAS_READABLE(1ULL << 3)) |
4851 | was_prot |= PROT_READ0x01; |
4852 | if (exit_qual & IA32_VMX_EPT_FAULT_WAS_WRITABLE(1ULL << 4)) |
4853 | was_prot |= PROT_WRITE0x02; |
4854 | if (exit_qual & IA32_VMX_EPT_FAULT_WAS_EXECABLE(1ULL << 5)) |
4855 | was_prot |= PROT_EXEC0x04; |
4856 | |
4857 | prot = 0; |
4858 | if (exit_qual & IA32_VMX_EPT_FAULT_READ(1ULL << 0)) |
4859 | prot = PROT_READ0x01; |
4860 | else if (exit_qual & IA32_VMX_EPT_FAULT_WRITE(1ULL << 1)) |
4861 | prot = PROT_WRITE0x02; |
4862 | else if (exit_qual & IA32_VMX_EPT_FAULT_EXEC(1ULL << 2)) |
4863 | prot = PROT_EXEC0x04; |
4864 | |
4865 | if ((was_prot & prot) == 0) |
4866 | return VM_FAULT_PROTECT((vm_fault_t) 0x1); |
4867 | |
4868 | return (-1); |
4869 | } |
4870 | |
4871 | /* |
4872 | * svm_get_guest_faulttype |
4873 | * |
4874 | * Determines the type (R/W/X) of the last fault on the VCPU last run on |
4875 | * this PCPU. |
4876 | */ |
4877 | int |
4878 | svm_get_guest_faulttype(struct vmcb *vmcb) |
4879 | { |
4880 | if (!(vmcb->v_exitinfo1 & 0x1)) |
4881 | return VM_FAULT_INVALID((vm_fault_t) 0x0); |
4882 | return VM_FAULT_PROTECT((vm_fault_t) 0x1); |
4883 | } |
4884 | |
4885 | /* |
4886 | * svm_fault_page |
4887 | * |
4888 | * Request a new page to be faulted into the UVM map of the VM owning 'vcpu' |
4889 | * at address 'gpa'. |
4890 | */ |
4891 | int |
4892 | svm_fault_page(struct vcpu *vcpu, paddr_t gpa) |
4893 | { |
4894 | int ret; |
4895 | |
4896 | ret = uvm_fault(vcpu->vc_parent->vm_map, gpa, VM_FAULT_WIRE((vm_fault_t) 0x2), |
4897 | PROT_READ0x01 | PROT_WRITE0x02 | PROT_EXEC0x04); |
4898 | if (ret) |
4899 | printf("%s: uvm_fault returns %d, GPA=0x%llx, rip=0x%llx\n", |
4900 | __func__, ret, (uint64_t)gpa, vcpu->vc_gueststate.vg_rip); |
4901 | |
4902 | return (ret); |
4903 | } |
4904 | |
4905 | /* |
4906 | * svm_handle_np_fault |
4907 | * |
4908 | * High level nested paging handler for SVM. Verifies that a fault is for a |
4909 | * valid memory region, then faults a page, or aborts otherwise. |
4910 | */ |
4911 | int |
4912 | svm_handle_np_fault(struct vcpu *vcpu) |
4913 | { |
4914 | uint64_t gpa; |
4915 | int gpa_memtype, ret = 0; |
4916 | struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va; |
4917 | struct vm_exit_eptviolation *vee = &vcpu->vc_exit.vee; |
4918 | struct cpu_info *ci = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;}); |
4919 | |
4920 | memset(vee, 0, sizeof(*vee))__builtin_memset((vee), (0), (sizeof(*vee))); |
4921 | |
4922 | gpa = vmcb->v_exitinfo2; |
4923 | |
4924 | gpa_memtype = vmm_get_guest_memtype(vcpu->vc_parent, gpa); |
4925 | switch (gpa_memtype) { |
4926 | case VMM_MEM_TYPE_REGULAR: |
4927 | vee->vee_fault_type = VEE_FAULT_HANDLED; |
4928 | ret = svm_fault_page(vcpu, gpa); |
4929 | break; |
4930 | case VMM_MEM_TYPE_MMIO: |
4931 | vee->vee_fault_type = VEE_FAULT_MMIO_ASSIST; |
4932 | if (ci->ci_vmm_cap.vcc_svm.svm_decode_assist) { |
4933 | vee->vee_insn_len = vmcb->v_n_bytes_fetched; |
4934 | memcpy(&vee->vee_insn_bytes, vmcb->v_guest_ins_bytes,__builtin_memcpy((&vee->vee_insn_bytes), (vmcb->v_guest_ins_bytes ), (sizeof(vee->vee_insn_bytes))) |
4935 | sizeof(vee->vee_insn_bytes))__builtin_memcpy((&vee->vee_insn_bytes), (vmcb->v_guest_ins_bytes ), (sizeof(vee->vee_insn_bytes))); |
4936 | vee->vee_insn_info |= VEE_BYTES_VALID0x2; |
4937 | } |
4938 | ret = EAGAIN35; |
4939 | break; |
4940 | default: |
4941 | printf("%s: unknown memory type %d for GPA 0x%llx\n", |
4942 | __func__, gpa_memtype, gpa); |
4943 | return (EINVAL22); |
4944 | } |
4945 | |
4946 | return (ret); |
4947 | } |
4948 | |
4949 | /* |
4950 | * vmx_fault_page |
4951 | * |
4952 | * Request a new page to be faulted into the UVM map of the VM owning 'vcpu' |
4953 | * at address 'gpa'. |
4954 | * |
4955 | * Parameters: |
4956 | * vcpu: guest VCPU requiring the page to be faulted into the UVM map |
4957 | * gpa: guest physical address that triggered the fault |
4958 | * |
4959 | * Return Values: |
4960 | * 0: if successful |
4961 | * EINVAL: if fault type could not be determined or VMCS reload fails |
4962 | * EAGAIN: if a protection fault occurred, ie writing to a read-only page |
4963 | * errno: if uvm_fault(9) fails to wire in the page |
4964 | */ |
4965 | int |
4966 | vmx_fault_page(struct vcpu *vcpu, paddr_t gpa) |
4967 | { |
4968 | int fault_type, ret; |
4969 | |
4970 | fault_type = vmx_get_guest_faulttype(); |
4971 | switch (fault_type) { |
4972 | case -1: |
4973 | printf("%s: invalid fault type\n", __func__); |
4974 | return (EINVAL22); |
4975 | case VM_FAULT_PROTECT((vm_fault_t) 0x1): |
4976 | vcpu->vc_exit.vee.vee_fault_type = VEE_FAULT_PROTECT; |
4977 | return (EAGAIN35); |
4978 | default: |
4979 | vcpu->vc_exit.vee.vee_fault_type = VEE_FAULT_HANDLED; |
4980 | break; |
4981 | } |
4982 | |
4983 | /* We may sleep during uvm_fault(9), so reload VMCS. */ |
4984 | vcpu->vc_last_pcpu = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;}); |
4985 | ret = uvm_fault(vcpu->vc_parent->vm_map, gpa, VM_FAULT_WIRE((vm_fault_t) 0x2), |
4986 | PROT_READ0x01 | PROT_WRITE0x02 | PROT_EXEC0x04); |
4987 | if (vcpu_reload_vmcs_vmx(vcpu)) { |
4988 | printf("%s: failed to reload vmcs\n", __func__); |
4989 | return (EINVAL22); |
4990 | } |
4991 | |
4992 | if (ret) |
4993 | printf("%s: uvm_fault returns %d, GPA=0x%llx, rip=0x%llx\n", |
4994 | __func__, ret, (uint64_t)gpa, vcpu->vc_gueststate.vg_rip); |
4995 | |
4996 | return (ret); |
4997 | } |
4998 | |
4999 | /* |
5000 | * vmx_handle_np_fault |
5001 | * |
5002 | * High level nested paging handler for VMX. Verifies that a fault is for a |
5003 | * valid memory region, then faults a page, or aborts otherwise. |
5004 | */ |
5005 | int |
5006 | vmx_handle_np_fault(struct vcpu *vcpu) |
5007 | { |
5008 | uint64_t insn_len = 0, gpa; |
5009 | int gpa_memtype, ret = 0; |
5010 | struct vm_exit_eptviolation *vee = &vcpu->vc_exit.vee; |
5011 | |
5012 | memset(vee, 0, sizeof(*vee))__builtin_memset((vee), (0), (sizeof(*vee))); |
5013 | |
5014 | if (vmread(VMCS_GUEST_PHYSICAL_ADDRESS0x2400, &gpa)) { |
5015 | printf("%s: cannot extract faulting pa\n", __func__); |
5016 | return (EINVAL22); |
5017 | } |
5018 | |
5019 | gpa_memtype = vmm_get_guest_memtype(vcpu->vc_parent, gpa); |
5020 | switch (gpa_memtype) { |
5021 | case VMM_MEM_TYPE_REGULAR: |
5022 | vee->vee_fault_type = VEE_FAULT_HANDLED; |
5023 | ret = vmx_fault_page(vcpu, gpa); |
5024 | break; |
5025 | case VMM_MEM_TYPE_MMIO: |
5026 | vee->vee_fault_type = VEE_FAULT_MMIO_ASSIST; |
5027 | if (vmread(VMCS_INSTRUCTION_LENGTH0x440C, &insn_len) || |
5028 | insn_len == 0 || insn_len > 15) { |
5029 | printf("%s: failed to extract instruction length\n", |
5030 | __func__); |
5031 | ret = EINVAL22; |
5032 | } else { |
5033 | vee->vee_insn_len = (uint32_t)insn_len; |
5034 | vee->vee_insn_info |= VEE_LEN_VALID0x1; |
5035 | ret = EAGAIN35; |
5036 | } |
5037 | break; |
5038 | default: |
5039 | printf("%s: unknown memory type %d for GPA 0x%llx\n", |
5040 | __func__, gpa_memtype, gpa); |
5041 | return (EINVAL22); |
5042 | } |
5043 | |
5044 | return (ret); |
5045 | } |
5046 | |
5047 | /* |
5048 | * vmm_get_guest_cpu_cpl |
5049 | * |
5050 | * Determines current CPL of 'vcpu'. On VMX/Intel, this is gathered from the |
5051 | * VMCS field for the DPL of SS (this seems odd, but is documented that way |
5052 | * in the SDM). For SVM/AMD, this is gathered directly from the VMCB's 'cpl' |
5053 | * field, as per the APM. |
5054 | * |
5055 | * Parameters: |
5056 | * vcpu: guest VCPU for which CPL is to be checked |
5057 | * |
5058 | * Return Values: |
5059 | * -1: the CPL could not be determined |
5060 | * 0-3 indicating the current CPL. For real mode operation, 0 is returned. |
5061 | */ |
5062 | int |
5063 | vmm_get_guest_cpu_cpl(struct vcpu *vcpu) |
5064 | { |
5065 | int mode; |
5066 | struct vmcb *vmcb; |
5067 | uint64_t ss_ar; |
5068 | |
5069 | mode = vmm_get_guest_cpu_mode(vcpu); |
5070 | |
5071 | if (mode == VMM_CPU_MODE_UNKNOWN) |
5072 | return (-1); |
5073 | |
5074 | if (mode == VMM_CPU_MODE_REAL) |
5075 | return (0); |
5076 | |
5077 | if (vmm_softc->mode == VMM_MODE_RVI) { |
5078 | vmcb = (struct vmcb *)vcpu->vc_control_va; |
5079 | return (vmcb->v_cpl); |
5080 | } else if (vmm_softc->mode == VMM_MODE_EPT) { |
5081 | if (vmread(VMCS_GUEST_IA32_SS_AR0x4818, &ss_ar)) |
5082 | return (-1); |
5083 | return ((ss_ar & 0x60) >> 5); |
5084 | } else |
5085 | return (-1); |
5086 | } |
5087 | |
5088 | /* |
5089 | * vmm_get_guest_cpu_mode |
5090 | * |
5091 | * Determines current CPU mode of 'vcpu'. |
5092 | * |
5093 | * Parameters: |
5094 | * vcpu: guest VCPU for which mode is to be checked |
5095 | * |
5096 | * Return Values: |
5097 | * One of VMM_CPU_MODE_*, or VMM_CPU_MODE_UNKNOWN if the mode could not be |
5098 | * ascertained. |
5099 | */ |
5100 | int |
5101 | vmm_get_guest_cpu_mode(struct vcpu *vcpu) |
5102 | { |
5103 | uint64_t cr0, efer, cs_ar; |
5104 | uint8_t l, dib; |
5105 | struct vmcb *vmcb; |
5106 | struct vmx_msr_store *msr_store; |
5107 | |
5108 | if (vmm_softc->mode == VMM_MODE_RVI) { |
5109 | vmcb = (struct vmcb *)vcpu->vc_control_va; |
5110 | cr0 = vmcb->v_cr0; |
5111 | efer = vmcb->v_efer; |
5112 | cs_ar = vmcb->v_cs.vs_attr; |
5113 | cs_ar = (cs_ar & 0xff) | ((cs_ar << 4) & 0xf000); |
5114 | } else if (vmm_softc->mode == VMM_MODE_EPT) { |
5115 | if (vmread(VMCS_GUEST_IA32_CR00x6800, &cr0)) |
5116 | return (VMM_CPU_MODE_UNKNOWN); |
5117 | if (vmread(VMCS_GUEST_IA32_CS_AR0x4816, &cs_ar)) |
5118 | return (VMM_CPU_MODE_UNKNOWN); |
5119 | msr_store = |
5120 | (struct vmx_msr_store *)vcpu->vc_vmx_msr_exit_save_va; |
5121 | efer = msr_store[VCPU_REGS_EFER0].vms_data; |
5122 | } else |
5123 | return (VMM_CPU_MODE_UNKNOWN); |
5124 | |
5125 | l = (cs_ar & 0x2000) >> 13; |
5126 | dib = (cs_ar & 0x4000) >> 14; |
5127 | |
5128 | /* Check CR0.PE */ |
5129 | if (!(cr0 & CR0_PE0x00000001)) |
5130 | return (VMM_CPU_MODE_REAL); |
5131 | |
5132 | /* Check EFER */ |
5133 | if (efer & EFER_LMA0x00000400) { |
5134 | /* Could be compat or long mode, check CS.L */ |
5135 | if (l) |
5136 | return (VMM_CPU_MODE_LONG); |
5137 | else |
5138 | return (VMM_CPU_MODE_COMPAT); |
5139 | } |
5140 | |
5141 | /* Check prot vs prot32 */ |
5142 | if (dib) |
5143 | return (VMM_CPU_MODE_PROT32); |
5144 | else |
5145 | return (VMM_CPU_MODE_PROT); |
5146 | } |
5147 | |
5148 | /* |
5149 | * svm_handle_inout |
5150 | * |
5151 | * Exit handler for IN/OUT instructions. |
5152 | * |
5153 | * Parameters: |
5154 | * vcpu: The VCPU where the IN/OUT instruction occurred |
5155 | * |
5156 | * Return values: |
5157 | * 0: if successful |
5158 | * EINVAL: an invalid IN/OUT instruction was encountered |
5159 | */ |
5160 | int |
5161 | svm_handle_inout(struct vcpu *vcpu) |
5162 | { |
5163 | uint64_t insn_length, exit_qual; |
5164 | struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va; |
5165 | |
5166 | insn_length = vmcb->v_exitinfo2 - vmcb->v_rip; |
5167 | exit_qual = vmcb->v_exitinfo1; |
5168 | |
5169 | /* Bit 0 - direction */ |
5170 | if (exit_qual & 0x1) |
5171 | vcpu->vc_exit.vei.vei_dir = VEI_DIR_IN; |
5172 | else |
5173 | vcpu->vc_exit.vei.vei_dir = VEI_DIR_OUT; |
5174 | /* Bit 2 - string instruction? */ |
5175 | vcpu->vc_exit.vei.vei_string = (exit_qual & 0x4) >> 2; |
5176 | /* Bit 3 - REP prefix? */ |
5177 | vcpu->vc_exit.vei.vei_rep = (exit_qual & 0x8) >> 3; |
5178 | |
5179 | /* Bits 4:6 - size of exit */ |
5180 | if (exit_qual & 0x10) |
5181 | vcpu->vc_exit.vei.vei_size = 1; |
5182 | else if (exit_qual & 0x20) |
5183 | vcpu->vc_exit.vei.vei_size = 2; |
5184 | else if (exit_qual & 0x40) |
5185 | vcpu->vc_exit.vei.vei_size = 4; |
5186 | |
5187 | /* Bit 16:31 - port */ |
5188 | vcpu->vc_exit.vei.vei_port = (exit_qual & 0xFFFF0000) >> 16; |
5189 | /* Data */ |
5190 | vcpu->vc_exit.vei.vei_data = vmcb->v_rax; |
5191 | |
5192 | vcpu->vc_exit.vei.vei_insn_len = (uint8_t)insn_length; |
5193 | |
5194 | TRACEPOINT(vmm, inout, vcpu, vcpu->vc_exit.vei.vei_port,do { extern struct dt_probe (dt_static_vmm_inout); struct dt_probe *dtp = &(dt_static_vmm_inout); if (__builtin_expect(((dt_tracing ) != 0), 0) && __builtin_expect(((dtp->dtp_recording ) != 0), 0)) { struct dt_provider *dtpv = dtp->dtp_prov; dtpv ->dtpv_enter(dtpv, dtp, vcpu, vcpu->vc_exit.vei.vei_port , vcpu->vc_exit.vei.vei_dir, vcpu->vc_exit.vei.vei_data ); } } while (0) |
5195 | vcpu->vc_exit.vei.vei_dir, vcpu->vc_exit.vei.vei_data)do { extern struct dt_probe (dt_static_vmm_inout); struct dt_probe *dtp = &(dt_static_vmm_inout); if (__builtin_expect(((dt_tracing ) != 0), 0) && __builtin_expect(((dtp->dtp_recording ) != 0), 0)) { struct dt_provider *dtpv = dtp->dtp_prov; dtpv ->dtpv_enter(dtpv, dtp, vcpu, vcpu->vc_exit.vei.vei_port , vcpu->vc_exit.vei.vei_dir, vcpu->vc_exit.vei.vei_data ); } } while (0); |
5196 | |
5197 | return (0); |
5198 | } |
5199 | |
5200 | /* |
5201 | * vmx_handle_inout |
5202 | * |
5203 | * Exit handler for IN/OUT instructions. |
5204 | * |
5205 | * Parameters: |
5206 | * vcpu: The VCPU where the IN/OUT instruction occurred |
5207 | * |
5208 | * Return values: |
5209 | * 0: if successful |
5210 | * EINVAL: invalid IN/OUT instruction or vmread failures occurred |
5211 | */ |
5212 | int |
5213 | vmx_handle_inout(struct vcpu *vcpu) |
5214 | { |
5215 | uint64_t insn_length, exit_qual; |
5216 | |
5217 | if (vmread(VMCS_INSTRUCTION_LENGTH0x440C, &insn_length)) { |
5218 | printf("%s: can't obtain instruction length\n", __func__); |
5219 | return (EINVAL22); |
5220 | } |
5221 | |
5222 | if (vmx_get_exit_qualification(&exit_qual)) { |
5223 | printf("%s: can't get exit qual\n", __func__); |
5224 | return (EINVAL22); |
5225 | } |
5226 | |
5227 | /* Bits 0:2 - size of exit */ |
5228 | vcpu->vc_exit.vei.vei_size = (exit_qual & 0x7) + 1; |
5229 | /* Bit 3 - direction */ |
5230 | if ((exit_qual & 0x8) >> 3) |
5231 | vcpu->vc_exit.vei.vei_dir = VEI_DIR_IN; |
5232 | else |
5233 | vcpu->vc_exit.vei.vei_dir = VEI_DIR_OUT; |
5234 | /* Bit 4 - string instruction? */ |
5235 | vcpu->vc_exit.vei.vei_string = (exit_qual & 0x10) >> 4; |
5236 | /* Bit 5 - REP prefix? */ |
5237 | vcpu->vc_exit.vei.vei_rep = (exit_qual & 0x20) >> 5; |
5238 | /* Bit 6 - Operand encoding */ |
5239 | vcpu->vc_exit.vei.vei_encoding = (exit_qual & 0x40) >> 6; |
5240 | /* Bit 16:31 - port */ |
5241 | vcpu->vc_exit.vei.vei_port = (exit_qual & 0xFFFF0000) >> 16; |
5242 | /* Data */ |
5243 | vcpu->vc_exit.vei.vei_data = (uint32_t)vcpu->vc_gueststate.vg_rax; |
5244 | |
5245 | vcpu->vc_exit.vei.vei_insn_len = (uint8_t)insn_length; |
5246 | |
5247 | TRACEPOINT(vmm, inout, vcpu, vcpu->vc_exit.vei.vei_port,do { extern struct dt_probe (dt_static_vmm_inout); struct dt_probe *dtp = &(dt_static_vmm_inout); if (__builtin_expect(((dt_tracing ) != 0), 0) && __builtin_expect(((dtp->dtp_recording ) != 0), 0)) { struct dt_provider *dtpv = dtp->dtp_prov; dtpv ->dtpv_enter(dtpv, dtp, vcpu, vcpu->vc_exit.vei.vei_port , vcpu->vc_exit.vei.vei_dir, vcpu->vc_exit.vei.vei_data ); } } while (0) |
5248 | vcpu->vc_exit.vei.vei_dir, vcpu->vc_exit.vei.vei_data)do { extern struct dt_probe (dt_static_vmm_inout); struct dt_probe *dtp = &(dt_static_vmm_inout); if (__builtin_expect(((dt_tracing ) != 0), 0) && __builtin_expect(((dtp->dtp_recording ) != 0), 0)) { struct dt_provider *dtpv = dtp->dtp_prov; dtpv ->dtpv_enter(dtpv, dtp, vcpu, vcpu->vc_exit.vei.vei_port , vcpu->vc_exit.vei.vei_dir, vcpu->vc_exit.vei.vei_data ); } } while (0); |
5249 | |
5250 | return (0); |
5251 | } |
5252 | |
5253 | /* |
5254 | * vmx_load_pdptes |
5255 | * |
5256 | * Update the PDPTEs in the VMCS with the values currently indicated by the |
5257 | * guest CR3. This is used for 32-bit PAE guests when enabling paging. |
5258 | * |
5259 | * Parameters |
5260 | * vcpu: The vcpu whose PDPTEs should be loaded |
5261 | * |
5262 | * Return values: |
5263 | * 0: if successful |
5264 | * EINVAL: if the PDPTEs could not be loaded |
5265 | * ENOMEM: memory allocation failure |
5266 | */ |
5267 | int |
5268 | vmx_load_pdptes(struct vcpu *vcpu) |
5269 | { |
5270 | uint64_t cr3, cr3_host_phys; |
5271 | vaddr_t cr3_host_virt; |
5272 | pd_entry_t *pdptes; |
5273 | int ret; |
5274 | |
5275 | if (vmread(VMCS_GUEST_IA32_CR30x6802, &cr3)) { |
5276 | printf("%s: can't read guest cr3\n", __func__); |
5277 | return (EINVAL22); |
5278 | } |
5279 | |
5280 | if (!pmap_extract(vcpu->vc_parent->vm_map->pmap, (vaddr_t)cr3, |
5281 | (paddr_t *)&cr3_host_phys)) { |
5282 | DPRINTF("%s: nonmapped guest CR3, setting PDPTEs to 0\n", |
5283 | __func__); |
5284 | if (vmwrite(VMCS_GUEST_PDPTE00x280A, 0)) { |
5285 | printf("%s: can't write guest PDPTE0\n", __func__); |
5286 | return (EINVAL22); |
5287 | } |
5288 | |
5289 | if (vmwrite(VMCS_GUEST_PDPTE10x280C, 0)) { |
5290 | printf("%s: can't write guest PDPTE1\n", __func__); |
5291 | return (EINVAL22); |
5292 | } |
5293 | |
5294 | if (vmwrite(VMCS_GUEST_PDPTE20x280E, 0)) { |
5295 | printf("%s: can't write guest PDPTE2\n", __func__); |
5296 | return (EINVAL22); |
5297 | } |
5298 | |
5299 | if (vmwrite(VMCS_GUEST_PDPTE30x2810, 0)) { |
5300 | printf("%s: can't write guest PDPTE3\n", __func__); |
5301 | return (EINVAL22); |
5302 | } |
5303 | return (0); |
5304 | } |
5305 | |
5306 | ret = 0; |
5307 | |
5308 | /* We may sleep during km_alloc(9), so reload VMCS. */ |
5309 | vcpu->vc_last_pcpu = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;}); |
5310 | cr3_host_virt = (vaddr_t)km_alloc(PAGE_SIZE(1 << 12), &kv_any, &kp_none, |
5311 | &kd_waitok); |
5312 | if (vcpu_reload_vmcs_vmx(vcpu)) { |
5313 | printf("%s: failed to reload vmcs\n", __func__); |
5314 | ret = EINVAL22; |
5315 | goto exit; |
5316 | } |
5317 | |
5318 | if (!cr3_host_virt) { |
5319 | printf("%s: can't allocate address for guest CR3 mapping\n", |
5320 | __func__); |
5321 | return (ENOMEM12); |
5322 | } |
5323 | |
5324 | pmap_kenter_pa(cr3_host_virt, cr3_host_phys, PROT_READ0x01); |
5325 | |
5326 | pdptes = (pd_entry_t *)cr3_host_virt; |
5327 | if (vmwrite(VMCS_GUEST_PDPTE00x280A, pdptes[0])) { |
5328 | printf("%s: can't write guest PDPTE0\n", __func__); |
5329 | ret = EINVAL22; |
5330 | goto exit; |
5331 | } |
5332 | |
5333 | if (vmwrite(VMCS_GUEST_PDPTE10x280C, pdptes[1])) { |
5334 | printf("%s: can't write guest PDPTE1\n", __func__); |
5335 | ret = EINVAL22; |
5336 | goto exit; |
5337 | } |
5338 | |
5339 | if (vmwrite(VMCS_GUEST_PDPTE20x280E, pdptes[2])) { |
5340 | printf("%s: can't write guest PDPTE2\n", __func__); |
5341 | ret = EINVAL22; |
5342 | goto exit; |
5343 | } |
5344 | |
5345 | if (vmwrite(VMCS_GUEST_PDPTE30x2810, pdptes[3])) { |
5346 | printf("%s: can't write guest PDPTE3\n", __func__); |
5347 | ret = EINVAL22; |
5348 | goto exit; |
5349 | } |
5350 | |
5351 | exit: |
5352 | pmap_kremove(cr3_host_virt, PAGE_SIZE(1 << 12)); |
5353 | |
5354 | /* km_free(9) might sleep, so we need to reload VMCS. */ |
5355 | vcpu->vc_last_pcpu = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;}); |
5356 | km_free((void *)cr3_host_virt, PAGE_SIZE(1 << 12), &kv_any, &kp_none); |
5357 | if (vcpu_reload_vmcs_vmx(vcpu)) { |
5358 | printf("%s: failed to reload vmcs after km_free\n", __func__); |
5359 | ret = EINVAL22; |
5360 | } |
5361 | |
5362 | return (ret); |
5363 | } |
5364 | |
5365 | /* |
5366 | * vmx_handle_cr0_write |
5367 | * |
5368 | * Write handler for CR0. This function ensures valid values are written into |
5369 | * CR0 for the cpu/vmm mode in use (cr0 must-be-0 and must-be-1 bits, etc). |
5370 | * |
5371 | * Parameters |
5372 | * vcpu: The vcpu taking the cr0 write exit |
5373 | * r: The guest's desired (incoming) cr0 value |
5374 | * |
5375 | * Return values: |
5376 | * 0: if successful |
5377 | * EINVAL: if an error occurred |
5378 | */ |
5379 | int |
5380 | vmx_handle_cr0_write(struct vcpu *vcpu, uint64_t r) |
5381 | { |
5382 | struct vmx_msr_store *msr_store; |
5383 | struct vmx_invvpid_descriptor vid; |
5384 | uint64_t ectls, oldcr0, cr4, mask; |
5385 | int ret; |
5386 | |
5387 | /* Check must-be-0 bits */ |
5388 | mask = vcpu->vc_vmx_cr0_fixed1; |
5389 | if (~r & mask) { |
5390 | /* Inject #GP, let the guest handle it */ |
5391 | DPRINTF("%s: guest set invalid bits in %%cr0. Zeros " |
5392 | "mask=0x%llx, data=0x%llx\n", __func__, |
5393 | vcpu->vc_vmx_cr0_fixed1, r); |
5394 | vmm_inject_gp(vcpu); |
5395 | return (0); |
5396 | } |
5397 | |
5398 | /* Check must-be-1 bits */ |
5399 | mask = vcpu->vc_vmx_cr0_fixed0; |
5400 | if ((r & mask) != mask) { |
5401 | /* Inject #GP, let the guest handle it */ |
5402 | DPRINTF("%s: guest set invalid bits in %%cr0. Ones " |
5403 | "mask=0x%llx, data=0x%llx\n", __func__, |
5404 | vcpu->vc_vmx_cr0_fixed0, r); |
5405 | vmm_inject_gp(vcpu); |
5406 | return (0); |
5407 | } |
5408 | |
5409 | if (r & 0xFFFFFFFF00000000ULL) { |
5410 | DPRINTF("%s: setting bits 63:32 of %%cr0 is invalid," |
5411 | " inject #GP, cr0=0x%llx\n", __func__, r); |
5412 | vmm_inject_gp(vcpu); |
5413 | return (0); |
5414 | } |
5415 | |
5416 | if ((r & CR0_PG0x80000000) && (r & CR0_PE0x00000001) == 0) { |
5417 | DPRINTF("%s: PG flag set when the PE flag is clear," |
5418 | " inject #GP, cr0=0x%llx\n", __func__, r); |
5419 | vmm_inject_gp(vcpu); |
5420 | return (0); |
5421 | } |
5422 | |
5423 | if ((r & CR0_NW0x20000000) && (r & CR0_CD0x40000000) == 0) { |
5424 | DPRINTF("%s: NW flag set when the CD flag is clear," |
5425 | " inject #GP, cr0=0x%llx\n", __func__, r); |
5426 | vmm_inject_gp(vcpu); |
5427 | return (0); |
5428 | } |
5429 | |
5430 | if (vmread(VMCS_GUEST_IA32_CR00x6800, &oldcr0)) { |
5431 | printf("%s: can't read guest cr0\n", __func__); |
5432 | return (EINVAL22); |
5433 | } |
5434 | |
5435 | /* CR0 must always have NE set */ |
5436 | r |= CR0_NE0x00000020; |
5437 | |
5438 | if (vmwrite(VMCS_GUEST_IA32_CR00x6800, r)) { |
5439 | printf("%s: can't write guest cr0\n", __func__); |
5440 | return (EINVAL22); |
5441 | } |
5442 | |
5443 | /* If the guest hasn't enabled paging ... */ |
5444 | if (!(r & CR0_PG0x80000000) && (oldcr0 & CR0_PG0x80000000)) { |
5445 | /* Paging was disabled (prev. enabled) - Flush TLB */ |
5446 | if (vmm_softc->mode == VMM_MODE_EPT && |
5447 | vcpu->vc_vmx_vpid_enabled) { |
5448 | vid.vid_vpid = vcpu->vc_vpid; |
5449 | vid.vid_addr = 0; |
5450 | invvpid(IA32_VMX_INVVPID_SINGLE_CTX_GLB0x3, &vid); |
5451 | } |
5452 | } else if (!(oldcr0 & CR0_PG0x80000000) && (r & CR0_PG0x80000000)) { |
5453 | /* |
5454 | * Since the guest has enabled paging, then the IA32_VMX_IA32E_MODE_GUEST |
5455 | * control must be set to the same as EFER_LME. |
5456 | */ |
5457 | msr_store = (struct vmx_msr_store *)vcpu->vc_vmx_msr_exit_save_va; |
5458 | |
5459 | if (vmread(VMCS_ENTRY_CTLS0x4012, &ectls)) { |
5460 | printf("%s: can't read entry controls", __func__); |
5461 | return (EINVAL22); |
5462 | } |
5463 | |
5464 | if (msr_store[VCPU_REGS_EFER0].vms_data & EFER_LME0x00000100) |
5465 | ectls |= IA32_VMX_IA32E_MODE_GUEST(1ULL << 9); |
5466 | else |
5467 | ectls &= ~IA32_VMX_IA32E_MODE_GUEST(1ULL << 9); |
5468 | |
5469 | if (vmwrite(VMCS_ENTRY_CTLS0x4012, ectls)) { |
5470 | printf("%s: can't write entry controls", __func__); |
5471 | return (EINVAL22); |
5472 | } |
5473 | |
5474 | if (vmread(VMCS_GUEST_IA32_CR40x6804, &cr4)) { |
5475 | printf("%s: can't read guest cr4\n", __func__); |
5476 | return (EINVAL22); |
5477 | } |
5478 | |
5479 | /* Load PDPTEs if PAE guest enabling paging */ |
5480 | if (cr4 & CR4_PAE0x00000020) { |
5481 | ret = vmx_load_pdptes(vcpu); |
5482 | |
5483 | if (ret) { |
5484 | printf("%s: updating PDPTEs failed\n", __func__); |
5485 | return (ret); |
5486 | } |
5487 | } |
5488 | } |
5489 | |
5490 | return (0); |
5491 | } |
5492 | |
5493 | /* |
5494 | * vmx_handle_cr4_write |
5495 | * |
5496 | * Write handler for CR4. This function ensures valid values are written into |
5497 | * CR4 for the cpu/vmm mode in use (cr4 must-be-0 and must-be-1 bits, etc). |
5498 | * |
5499 | * Parameters |
5500 | * vcpu: The vcpu taking the cr4 write exit |
5501 | * r: The guest's desired (incoming) cr4 value |
5502 | * |
5503 | * Return values: |
5504 | * 0: if successful |
5505 | * EINVAL: if an error occurred |
5506 | */ |
5507 | int |
5508 | vmx_handle_cr4_write(struct vcpu *vcpu, uint64_t r) |
5509 | { |
5510 | uint64_t mask; |
5511 | |
5512 | /* Check must-be-0 bits */ |
5513 | mask = ~(curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed1); |
5514 | if (r & mask) { |
5515 | /* Inject #GP, let the guest handle it */ |
5516 | DPRINTF("%s: guest set invalid bits in %%cr4. Zeros " |
5517 | "mask=0x%llx, data=0x%llx\n", __func__, |
5518 | curcpu()->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed1, |
5519 | r); |
5520 | vmm_inject_gp(vcpu); |
5521 | return (0); |
5522 | } |
5523 | |
5524 | /* Check must-be-1 bits */ |
5525 | mask = curcpu()({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed0; |
5526 | if ((r & mask) != mask) { |
5527 | /* Inject #GP, let the guest handle it */ |
5528 | DPRINTF("%s: guest set invalid bits in %%cr4. Ones " |
5529 | "mask=0x%llx, data=0x%llx\n", __func__, |
5530 | curcpu()->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed0, |
5531 | r); |
5532 | vmm_inject_gp(vcpu); |
5533 | return (0); |
5534 | } |
5535 | |
5536 | /* CR4_VMXE must always be enabled */ |
5537 | r |= CR4_VMXE0x00002000; |
5538 | |
5539 | if (vmwrite(VMCS_GUEST_IA32_CR40x6804, r)) { |
5540 | printf("%s: can't write guest cr4\n", __func__); |
5541 | return (EINVAL22); |
5542 | } |
5543 | |
5544 | return (0); |
5545 | } |
5546 | |
5547 | /* |
5548 | * vmx_handle_cr |
5549 | * |
5550 | * Handle reads/writes to control registers (except CR3) |
5551 | */ |
5552 | int |
5553 | vmx_handle_cr(struct vcpu *vcpu) |
5554 | { |
5555 | uint64_t insn_length, exit_qual, r; |
5556 | uint8_t crnum, dir, reg; |
5557 | |
5558 | if (vmread(VMCS_INSTRUCTION_LENGTH0x440C, &insn_length)) { |
5559 | printf("%s: can't obtain instruction length\n", __func__); |
5560 | return (EINVAL22); |
5561 | } |
5562 | |
5563 | if (vmx_get_exit_qualification(&exit_qual)) { |
5564 | printf("%s: can't get exit qual\n", __func__); |
5565 | return (EINVAL22); |
5566 | } |
5567 | |
5568 | /* Low 4 bits of exit_qual represent the CR number */ |
5569 | crnum = exit_qual & 0xf; |
5570 | |
5571 | /* |
5572 | * Bits 5:4 indicate the direction of operation (or special CR-modifying |
5573 | * instruction) |
5574 | */ |
5575 | dir = (exit_qual & 0x30) >> 4; |
5576 | |
5577 | /* Bits 11:8 encode the source/target register */ |
5578 | reg = (exit_qual & 0xf00) >> 8; |
5579 | |
5580 | switch (dir) { |
5581 | case CR_WRITE0: |
5582 | if (crnum == 0 || crnum == 4) { |
5583 | switch (reg) { |
5584 | case 0: r = vcpu->vc_gueststate.vg_rax; break; |
5585 | case 1: r = vcpu->vc_gueststate.vg_rcx; break; |
5586 | case 2: r = vcpu->vc_gueststate.vg_rdx; break; |
5587 | case 3: r = vcpu->vc_gueststate.vg_rbx; break; |
5588 | case 4: if (vmread(VMCS_GUEST_IA32_RSP0x681C, &r)) { |
5589 | printf("%s: unable to read guest " |
5590 | "RSP\n", __func__); |
5591 | return (EINVAL22); |
5592 | } |
5593 | break; |
5594 | case 5: r = vcpu->vc_gueststate.vg_rbp; break; |
5595 | case 6: r = vcpu->vc_gueststate.vg_rsi; break; |
5596 | case 7: r = vcpu->vc_gueststate.vg_rdi; break; |
5597 | case 8: r = vcpu->vc_gueststate.vg_r8; break; |
5598 | case 9: r = vcpu->vc_gueststate.vg_r9; break; |
5599 | case 10: r = vcpu->vc_gueststate.vg_r10; break; |
5600 | case 11: r = vcpu->vc_gueststate.vg_r11; break; |
5601 | case 12: r = vcpu->vc_gueststate.vg_r12; break; |
5602 | case 13: r = vcpu->vc_gueststate.vg_r13; break; |
5603 | case 14: r = vcpu->vc_gueststate.vg_r14; break; |
5604 | case 15: r = vcpu->vc_gueststate.vg_r15; break; |
5605 | } |
5606 | DPRINTF("%s: mov to cr%d @ %llx, data=0x%llx\n", |
5607 | __func__, crnum, vcpu->vc_gueststate.vg_rip, r); |
5608 | } |
5609 | |
5610 | if (crnum == 0) |
5611 | vmx_handle_cr0_write(vcpu, r); |
5612 | |
5613 | if (crnum == 4) |
5614 | vmx_handle_cr4_write(vcpu, r); |
5615 | |
5616 | break; |
5617 | case CR_READ1: |
5618 | DPRINTF("%s: mov from cr%d @ %llx\n", __func__, crnum, |
5619 | vcpu->vc_gueststate.vg_rip); |
5620 | break; |
5621 | case CR_CLTS2: |
5622 | DPRINTF("%s: clts instruction @ %llx\n", __func__, |
5623 | vcpu->vc_gueststate.vg_rip); |
5624 | break; |
5625 | case CR_LMSW3: |
5626 | DPRINTF("%s: lmsw instruction @ %llx\n", __func__, |
5627 | vcpu->vc_gueststate.vg_rip); |
5628 | break; |
5629 | default: |
5630 | DPRINTF("%s: unknown cr access @ %llx\n", __func__, |
5631 | vcpu->vc_gueststate.vg_rip); |
5632 | } |
5633 | |
5634 | vcpu->vc_gueststate.vg_rip += insn_length; |
5635 | |
5636 | return (0); |
5637 | } |
5638 | |
5639 | /* |
5640 | * vmx_handle_rdmsr |
5641 | * |
5642 | * Handler for rdmsr instructions. Bitmap MSRs are allowed implicit access |
5643 | * and won't end up here. This handler is primarily intended to catch otherwise |
5644 | * unknown MSR access for possible later inclusion in the bitmap list. For |
5645 | * each MSR access that ends up here, we log the access (when VMM_DEBUG is |
5646 | * enabled) |
5647 | * |
5648 | * Parameters: |
5649 | * vcpu: vcpu structure containing instruction info causing the exit |
5650 | * |
5651 | * Return value: |
5652 | * 0: The operation was successful |
5653 | * EINVAL: An error occurred |
5654 | */ |
5655 | int |
5656 | vmx_handle_rdmsr(struct vcpu *vcpu) |
5657 | { |
5658 | uint64_t insn_length; |
5659 | uint64_t *rax, *rdx; |
5660 | uint64_t *rcx; |
5661 | int ret; |
5662 | |
5663 | if (vmread(VMCS_INSTRUCTION_LENGTH0x440C, &insn_length)) { |
5664 | printf("%s: can't obtain instruction length\n", __func__); |
5665 | return (EINVAL22); |
5666 | } |
5667 | |
5668 | if (insn_length != 2) { |
5669 | DPRINTF("%s: RDMSR with instruction length %lld not " |
5670 | "supported\n", __func__, insn_length); |
5671 | return (EINVAL22); |
5672 | } |
5673 | |
5674 | rax = &vcpu->vc_gueststate.vg_rax; |
5675 | rcx = &vcpu->vc_gueststate.vg_rcx; |
5676 | rdx = &vcpu->vc_gueststate.vg_rdx; |
5677 | |
5678 | switch (*rcx) { |
5679 | case MSR_BIOS_SIGN0x08b: |
5680 | case MSR_PLATFORM_ID0x017: |
5681 | /* Ignored */ |
5682 | *rax = 0; |
5683 | *rdx = 0; |
5684 | break; |
5685 | case MSR_CR_PAT0x277: |
5686 | *rax = (vcpu->vc_shadow_pat & 0xFFFFFFFFULL); |
5687 | *rdx = (vcpu->vc_shadow_pat >> 32); |
5688 | break; |
5689 | default: |
5690 | /* Unsupported MSRs causes #GP exception, don't advance %rip */ |
5691 | DPRINTF("%s: unsupported rdmsr (msr=0x%llx), injecting #GP\n", |
5692 | __func__, *rcx); |
5693 | ret = vmm_inject_gp(vcpu); |
5694 | return (ret); |
5695 | } |
5696 | |
5697 | vcpu->vc_gueststate.vg_rip += insn_length; |
5698 | |
5699 | return (0); |
5700 | } |
5701 | |
5702 | /* |
5703 | * vmx_handle_xsetbv |
5704 | * |
5705 | * VMX-specific part of the xsetbv instruction exit handler |
5706 | * |
5707 | * Parameters: |
5708 | * vcpu: vcpu structure containing instruction info causing the exit |
5709 | * |
5710 | * Return value: |
5711 | * 0: The operation was successful |
5712 | * EINVAL: An error occurred |
5713 | */ |
5714 | int |
5715 | vmx_handle_xsetbv(struct vcpu *vcpu) |
5716 | { |
5717 | uint64_t insn_length, *rax; |
5718 | int ret; |
5719 | |
5720 | if (vmread(VMCS_INSTRUCTION_LENGTH0x440C, &insn_length)) { |
5721 | printf("%s: can't obtain instruction length\n", __func__); |
5722 | return (EINVAL22); |
5723 | } |
5724 | |
5725 | /* All XSETBV instructions are 3 bytes */ |
5726 | if (insn_length != 3) { |
5727 | DPRINTF("%s: XSETBV with instruction length %lld not " |
5728 | "supported\n", __func__, insn_length); |
5729 | return (EINVAL22); |
5730 | } |
5731 | |
5732 | rax = &vcpu->vc_gueststate.vg_rax; |
5733 | |
5734 | ret = vmm_handle_xsetbv(vcpu, rax); |
5735 | |
5736 | vcpu->vc_gueststate.vg_rip += insn_length; |
5737 | |
5738 | return ret; |
5739 | } |
5740 | |
5741 | /* |
5742 | * svm_handle_xsetbv |
5743 | * |
5744 | * SVM-specific part of the xsetbv instruction exit handler |
5745 | * |
5746 | * Parameters: |
5747 | * vcpu: vcpu structure containing instruction info causing the exit |
5748 | * |
5749 | * Return value: |
5750 | * 0: The operation was successful |
5751 | * EINVAL: An error occurred |
5752 | */ |
5753 | int |
5754 | svm_handle_xsetbv(struct vcpu *vcpu) |
5755 | { |
5756 | uint64_t insn_length, *rax; |
5757 | int ret; |
5758 | struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va; |
5759 | |
5760 | /* All XSETBV instructions are 3 bytes */ |
5761 | insn_length = 3; |
5762 | |
5763 | rax = &vmcb->v_rax; |
5764 | |
5765 | ret = vmm_handle_xsetbv(vcpu, rax); |
5766 | |
5767 | vcpu->vc_gueststate.vg_rip += insn_length; |
5768 | |
5769 | return ret; |
5770 | } |
5771 | |
5772 | /* |
5773 | * vmm_handle_xsetbv |
5774 | * |
5775 | * Handler for xsetbv instructions. We allow the guest VM to set xcr0 values |
5776 | * limited to the xsave_mask in use in the host. |
5777 | * |
5778 | * Parameters: |
5779 | * vcpu: vcpu structure containing instruction info causing the exit |
5780 | * rax: pointer to guest %rax |
5781 | * |
5782 | * Return value: |
5783 | * 0: The operation was successful |
5784 | * EINVAL: An error occurred |
5785 | */ |
5786 | int |
5787 | vmm_handle_xsetbv(struct vcpu *vcpu, uint64_t *rax) |
5788 | { |
5789 | uint64_t *rdx, *rcx, val; |
5790 | |
5791 | rcx = &vcpu->vc_gueststate.vg_rcx; |
5792 | rdx = &vcpu->vc_gueststate.vg_rdx; |
5793 | |
5794 | if (vmm_get_guest_cpu_cpl(vcpu) != 0) { |
5795 | DPRINTF("%s: guest cpl not zero\n", __func__); |
5796 | return (vmm_inject_gp(vcpu)); |
5797 | } |
5798 | |
5799 | if (*rcx != 0) { |
5800 | DPRINTF("%s: guest specified invalid xcr register number " |
5801 | "%lld\n", __func__, *rcx); |
5802 | return (vmm_inject_gp(vcpu)); |
5803 | } |
5804 | |
5805 | val = *rax + (*rdx << 32); |
5806 | if (val & ~xsave_mask) { |
5807 | DPRINTF("%s: guest specified xcr0 outside xsave_mask %lld\n", |
5808 | __func__, val); |
5809 | return (vmm_inject_gp(vcpu)); |
5810 | } |
5811 | |
5812 | vcpu->vc_gueststate.vg_xcr0 = val; |
5813 | |
5814 | return (0); |
5815 | } |
5816 | |
5817 | /* |
5818 | * vmx_handle_misc_enable_msr |
5819 | * |
5820 | * Handler for writes to the MSR_MISC_ENABLE (0x1a0) MSR on Intel CPUs. We |
5821 | * limit what the guest can write to this MSR (certain hardware-related |
5822 | * settings like speedstep, etc). |
5823 | * |
5824 | * Parameters: |
5825 | * vcpu: vcpu structure containing information about the wrmsr causing this |
5826 | * exit |
5827 | */ |
5828 | void |
5829 | vmx_handle_misc_enable_msr(struct vcpu *vcpu) |
5830 | { |
5831 | uint64_t *rax, *rdx; |
5832 | struct vmx_msr_store *msr_store; |
5833 | |
5834 | rax = &vcpu->vc_gueststate.vg_rax; |
5835 | rdx = &vcpu->vc_gueststate.vg_rdx; |
5836 | msr_store = (struct vmx_msr_store *)vcpu->vc_vmx_msr_exit_save_va; |
5837 | |
5838 | /* Filter out guest writes to TCC, EIST, and xTPR */ |
5839 | *rax &= ~(MISC_ENABLE_TCC(1 << 3) | MISC_ENABLE_EIST_ENABLED(1 << 16) | |
5840 | MISC_ENABLE_xTPR_MESSAGE_DISABLE(1 << 23)); |
5841 | |
5842 | msr_store[VCPU_REGS_MISC_ENABLE6].vms_data = *rax | (*rdx << 32); |
5843 | } |
5844 | |
5845 | /* |
5846 | * vmx_handle_wrmsr |
5847 | * |
5848 | * Handler for wrmsr instructions. This handler logs the access, and discards |
5849 | * the written data (when VMM_DEBUG is enabled). Any valid wrmsr will not end |
5850 | * up here (it will be whitelisted in the MSR bitmap). |
5851 | * |
5852 | * Parameters: |
5853 | * vcpu: vcpu structure containing instruction info causing the exit |
5854 | * |
5855 | * Return value: |
5856 | * 0: The operation was successful |
5857 | * EINVAL: An error occurred |
5858 | */ |
5859 | int |
5860 | vmx_handle_wrmsr(struct vcpu *vcpu) |
5861 | { |
5862 | uint64_t insn_length, val; |
5863 | uint64_t *rax, *rdx, *rcx; |
5864 | int ret; |
5865 | |
5866 | if (vmread(VMCS_INSTRUCTION_LENGTH0x440C, &insn_length)) { |
5867 | printf("%s: can't obtain instruction length\n", __func__); |
5868 | return (EINVAL22); |
5869 | } |
5870 | |
5871 | if (insn_length != 2) { |
5872 | DPRINTF("%s: WRMSR with instruction length %lld not " |
5873 | "supported\n", __func__, insn_length); |
5874 | return (EINVAL22); |
5875 | } |
5876 | |
5877 | rax = &vcpu->vc_gueststate.vg_rax; |
5878 | rcx = &vcpu->vc_gueststate.vg_rcx; |
5879 | rdx = &vcpu->vc_gueststate.vg_rdx; |
5880 | val = (*rdx << 32) | (*rax & 0xFFFFFFFFULL); |
5881 | |
5882 | switch (*rcx) { |
5883 | case MSR_CR_PAT0x277: |
5884 | if (!vmm_pat_is_valid(val)) { |
5885 | ret = vmm_inject_gp(vcpu); |
5886 | return (ret); |
5887 | } |
5888 | vcpu->vc_shadow_pat = val; |
5889 | break; |
5890 | case MSR_MISC_ENABLE0x1a0: |
5891 | vmx_handle_misc_enable_msr(vcpu); |
5892 | break; |
5893 | case MSR_SMM_MONITOR_CTL0x09b: |
5894 | /* |
5895 | * 34.15.5 - Enabling dual monitor treatment |
5896 | * |
5897 | * Unsupported, so inject #GP and return without |
5898 | * advancing %rip. |
5899 | */ |
5900 | ret = vmm_inject_gp(vcpu); |
5901 | return (ret); |
5902 | case KVM_MSR_SYSTEM_TIME0x4b564d01: |
5903 | vmm_init_pvclock(vcpu, |
5904 | (*rax & 0xFFFFFFFFULL) | (*rdx << 32)); |
5905 | break; |
5906 | #ifdef VMM_DEBUG |
5907 | default: |
5908 | /* |
5909 | * Log the access, to be able to identify unknown MSRs |
5910 | */ |
5911 | DPRINTF("%s: wrmsr exit, msr=0x%llx, discarding data " |
5912 | "written from guest=0x%llx:0x%llx\n", __func__, |
5913 | *rcx, *rdx, *rax); |
5914 | #endif /* VMM_DEBUG */ |
5915 | } |
5916 | |
5917 | vcpu->vc_gueststate.vg_rip += insn_length; |
5918 | |
5919 | return (0); |
5920 | } |
5921 | |
5922 | /* |
5923 | * svm_handle_msr |
5924 | * |
5925 | * Handler for MSR instructions. |
5926 | * |
5927 | * Parameters: |
5928 | * vcpu: vcpu structure containing instruction info causing the exit |
5929 | * |
5930 | * Return value: |
5931 | * Always 0 (successful) |
5932 | */ |
5933 | int |
5934 | svm_handle_msr(struct vcpu *vcpu) |
5935 | { |
5936 | uint64_t insn_length, val; |
5937 | uint64_t *rax, *rcx, *rdx; |
5938 | struct vmcb *vmcb = (struct vmcb *)vcpu->vc_control_va; |
5939 | int ret; |
5940 | |
5941 | /* XXX: Validate RDMSR / WRMSR insn_length */ |
5942 | insn_length = 2; |
5943 | |
5944 | rax = &vmcb->v_rax; |
5945 | rcx = &vcpu->vc_gueststate.vg_rcx; |
5946 | rdx = &vcpu->vc_gueststate.vg_rdx; |
5947 | |
5948 | if (vmcb->v_exitinfo1 == 1) { |
5949 | /* WRMSR */ |
5950 | val = (*rdx << 32) | (*rax & 0xFFFFFFFFULL); |
5951 | |
5952 | switch (*rcx) { |
5953 | case MSR_CR_PAT0x277: |
5954 | if (!vmm_pat_is_valid(val)) { |
5955 | ret = vmm_inject_gp(vcpu); |
5956 | return (ret); |
5957 | } |
5958 | vcpu->vc_shadow_pat = val; |
5959 | break; |
5960 | case MSR_EFER0xc0000080: |
5961 | vmcb->v_efer = *rax | EFER_SVME0x00001000; |
5962 | break; |
5963 | case KVM_MSR_SYSTEM_TIME0x4b564d01: |
5964 | vmm_init_pvclock(vcpu, |
5965 | (*rax & 0xFFFFFFFFULL) | (*rdx << 32)); |
5966 | break; |
5967 | default: |
5968 | /* Log the access, to be able to identify unknown MSRs */ |
5969 | DPRINTF("%s: wrmsr exit, msr=0x%llx, discarding data " |
5970 | "written from guest=0x%llx:0x%llx\n", __func__, |
5971 | *rcx, *rdx, *rax); |
5972 | } |
5973 | } else { |
5974 | /* RDMSR */ |
5975 | switch (*rcx) { |
5976 | case MSR_BIOS_SIGN0x08b: |
5977 | case MSR_INT_PEN_MSG0xc0010055: |
5978 | case MSR_PLATFORM_ID0x017: |
5979 | /* Ignored */ |
5980 | *rax = 0; |
5981 | *rdx = 0; |
5982 | break; |
5983 | case MSR_CR_PAT0x277: |
5984 | *rax = (vcpu->vc_shadow_pat & 0xFFFFFFFFULL); |
5985 | *rdx = (vcpu->vc_shadow_pat >> 32); |
5986 | break; |
5987 | case MSR_DE_CFG0xc0011029: |
5988 | /* LFENCE serializing bit is set by host */ |
5989 | *rax = DE_CFG_SERIALIZE_LFENCE(1 << 1); |
5990 | *rdx = 0; |
5991 | break; |
5992 | default: |
5993 | /* |
5994 | * Unsupported MSRs causes #GP exception, don't advance |
5995 | * %rip |
5996 | */ |
5997 | DPRINTF("%s: unsupported rdmsr (msr=0x%llx), " |
5998 | "injecting #GP\n", __func__, *rcx); |
5999 | ret = vmm_inject_gp(vcpu); |
6000 | return (ret); |
6001 | } |
6002 | } |
6003 | |
6004 | vcpu->vc_gueststate.vg_rip += insn_length; |
6005 | |
6006 | return (0); |
6007 | } |
6008 | |
6009 | /* |
6010 | * vmm_handle_cpuid |
6011 | * |
6012 | * Exit handler for CPUID instruction |
6013 | * |
6014 | * Parameters: |
6015 | * vcpu: vcpu causing the CPUID exit |
6016 | * |
6017 | * Return value: |
6018 | * 0: the exit was processed successfully |
6019 | * EINVAL: error occurred validating the CPUID instruction arguments |
6020 | */ |
6021 | int |
6022 | vmm_handle_cpuid(struct vcpu *vcpu) |
6023 | { |
6024 | uint64_t insn_length, cr4; |
6025 | uint64_t *rax, *rbx, *rcx, *rdx; |
6026 | struct vmcb *vmcb; |
6027 | uint32_t leaf, subleaf, eax, ebx, ecx, edx; |
6028 | struct vmx_msr_store *msr_store; |
6029 | int vmm_cpuid_level; |
6030 | |
6031 | /* what's the cpuid level we support/advertise? */ |
6032 | vmm_cpuid_level = cpuid_level; |
6033 | if (vmm_cpuid_level < 0x15 && tsc_is_invariant) |
6034 | vmm_cpuid_level = 0x15; |
6035 | |
6036 | if (vmm_softc->mode == VMM_MODE_EPT) { |
6037 | if (vmread(VMCS_INSTRUCTION_LENGTH0x440C, &insn_length)) { |
6038 | DPRINTF("%s: can't obtain instruction length\n", |
6039 | __func__); |
6040 | return (EINVAL22); |
6041 | } |
6042 | |
6043 | if (vmread(VMCS_GUEST_IA32_CR40x6804, &cr4)) { |
6044 | DPRINTF("%s: can't obtain cr4\n", __func__); |
6045 | return (EINVAL22); |
6046 | } |
6047 | |
6048 | rax = &vcpu->vc_gueststate.vg_rax; |
6049 | |
6050 | /* |
6051 | * "CPUID leaves above 02H and below 80000000H are only |
6052 | * visible when IA32_MISC_ENABLE MSR has bit 22 set to its |
6053 | * default value 0" |
6054 | */ |