With cross-vendor support gone, it's no longer needed. AMD CPUs ignore the top 32 bits of the SYSENTER/SYSEXIT MSRs, which is not how this emulation worked due to the need for cross-vendor support. Any AMD VMs storing state in the top 32bits of the SEP MSRs will lose it.
It's very unlikely to affect any production VM because having 64bit width just isn't how real AMD CPUs behave. Signed-off-by: Alejandro Vallejo <[email protected]> Reviewed-by: Teddy Astie <[email protected]> Acked-by: Jan Beulich <[email protected]> --- v5: * New title --- xen/arch/x86/hvm/svm/svm.c | 42 +++++++++++------------- xen/arch/x86/hvm/svm/vmcb.c | 3 ++ xen/arch/x86/include/asm/hvm/svm-types.h | 10 ------ 3 files changed, 22 insertions(+), 33 deletions(-) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 20591c4a44f..076d57e4847 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -401,10 +401,6 @@ static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c) { struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; - c->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs; - c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp; - c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip; - if ( vmcb->event_inj.v && hvm_event_needs_reinjection(vmcb->event_inj.type, vmcb->event_inj.vector) ) @@ -468,11 +464,6 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) svm_update_guest_cr(v, 0, 0); svm_update_guest_cr(v, 4, 0); - /* Load sysenter MSRs into both VMCB save area and VCPU fields. */ - vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = c->sysenter_cs; - vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = c->sysenter_esp; - vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = c->sysenter_eip; - if ( paging_mode_hap(v->domain) ) { vmcb_set_np(vmcb, true); @@ -501,6 +492,9 @@ static void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) { struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; + data->sysenter_cs = vmcb->sysenter_cs; + data->sysenter_esp = vmcb->sysenter_esp; + data->sysenter_eip = vmcb->sysenter_eip; data->shadow_gs = vmcb->kerngsbase; data->msr_lstar = vmcb->lstar; data->msr_star = vmcb->star; @@ -512,11 +506,14 @@ static void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data) { struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb; - vmcb->kerngsbase = data->shadow_gs; - vmcb->lstar = data->msr_lstar; - vmcb->star = data->msr_star; - vmcb->cstar = data->msr_cstar; - vmcb->sfmask = data->msr_syscall_mask; + vmcb->lstar = data->msr_lstar; + vmcb->star = data->msr_star; + vmcb->cstar = data->msr_cstar; + vmcb->sfmask = data->msr_syscall_mask; + vmcb->kerngsbase = data->shadow_gs; + vmcb->sysenter_cs = data->sysenter_cs; + vmcb->sysenter_esp = data->sysenter_esp; + vmcb->sysenter_eip = data->sysenter_eip; v->arch.hvm.guest_efer = data->msr_efer; svm_update_guest_efer(v); } @@ -1734,12 +1731,9 @@ static int cf_check svm_msr_read_intercept( switch ( msr ) { - /* - * Sync not needed while the cross-vendor logic is in unilateral effect. case MSR_IA32_SYSENTER_CS: case MSR_IA32_SYSENTER_ESP: case MSR_IA32_SYSENTER_EIP: - */ case MSR_STAR: case MSR_LSTAR: case MSR_CSTAR: @@ -1754,13 +1748,15 @@ static int cf_check svm_msr_read_intercept( switch ( msr ) { case MSR_IA32_SYSENTER_CS: - *msr_content = v->arch.hvm.svm.guest_sysenter_cs; + *msr_content = vmcb->sysenter_cs; break; + case MSR_IA32_SYSENTER_ESP: - *msr_content = v->arch.hvm.svm.guest_sysenter_esp; + *msr_content = vmcb->sysenter_esp; break; + case MSR_IA32_SYSENTER_EIP: - *msr_content = v->arch.hvm.svm.guest_sysenter_eip; + *msr_content = vmcb->sysenter_eip; break; case MSR_STAR: @@ -1954,11 +1950,11 @@ static int cf_check svm_msr_write_intercept( switch ( msr ) { case MSR_IA32_SYSENTER_ESP: - vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = msr_content; + vmcb->sysenter_esp = msr_content; break; case MSR_IA32_SYSENTER_EIP: - vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = msr_content; + vmcb->sysenter_eip = msr_content; break; case MSR_LSTAR: @@ -1984,7 +1980,7 @@ static int cf_check svm_msr_write_intercept( break; case MSR_IA32_SYSENTER_CS: - vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = msr_content; + vmcb->sysenter_cs = msr_content; break; case MSR_STAR: diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c index e583ef8548c..76fcaf15c2b 100644 --- a/xen/arch/x86/hvm/svm/vmcb.c +++ b/xen/arch/x86/hvm/svm/vmcb.c @@ -97,6 +97,9 @@ static int construct_vmcb(struct vcpu *v) svm_disable_intercept_for_msr(v, MSR_LSTAR); svm_disable_intercept_for_msr(v, MSR_STAR); svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK); + svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS); + svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP); + svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP); vmcb->_msrpm_base_pa = virt_to_maddr(svm->msrpm); vmcb->_iopm_base_pa = __pa(v->domain->arch.hvm.io_bitmap); diff --git a/xen/arch/x86/include/asm/hvm/svm-types.h b/xen/arch/x86/include/asm/hvm/svm-types.h index 051b235d8f6..aaee91b4b61 100644 --- a/xen/arch/x86/include/asm/hvm/svm-types.h +++ b/xen/arch/x86/include/asm/hvm/svm-types.h @@ -27,16 +27,6 @@ struct svm_vcpu { /* VMCB has a cached instruction from #PF/#NPF Decode Assist? */ uint8_t cached_insn_len; /* Zero if no cached instruction. */ - - /* - * Upper four bytes are undefined in the VMCB, therefore we can't use the - * fields in the VMCB. Write a 64bit value and then read a 64bit value is - * fine unless there's a VMRUN/VMEXIT in between which clears the upper - * four bytes. - */ - uint64_t guest_sysenter_cs; - uint64_t guest_sysenter_esp; - uint64_t guest_sysenter_eip; }; struct nestedsvm { -- 2.43.0
