With the Virtual NMI (vNMI), the pending NMI is simply stuffed into the VMCB and handed off to the hardware. There is no need for the artificial tracking of the NMI handling completion with the IRET instruction interception.
Adjust the svm_inject_nmi to rather inject the NMIs using the vNMI Hardware accelerated feature when the AMD platform supports the vNMI. Signed-off-by: Abdelkareem Abdelsaamad <[email protected]> --- xen/arch/x86/hvm/svm/intr.c | 8 ++++++++ xen/arch/x86/hvm/svm/svm.c | 1 + xen/arch/x86/hvm/svm/vmcb.c | 2 ++ 3 files changed, 11 insertions(+) diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c index 6453a46b85..996362f8a6 100644 --- a/xen/arch/x86/hvm/svm/intr.c +++ b/xen/arch/x86/hvm/svm/intr.c @@ -33,6 +33,14 @@ static void svm_inject_nmi(struct vcpu *v) u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); intinfo_t event; + if ( vmcb->_vintr.fields.vnmi_enable ) + { + if ( !vmcb->_vintr.fields.vnmi_pending ) + vmcb->_vintr.fields.vnmi_pending = 1; + + return; + } + event.raw = 0; event.v = true; event.type = X86_ET_NMI; diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 18ba837738..815565c33f 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -2524,6 +2524,7 @@ const struct hvm_function_table * __init start_svm(void) P(cpu_has_tsc_ratio, "TSC Rate MSR"); P(cpu_has_svm_sss, "NPT Supervisor Shadow Stack"); P(cpu_has_svm_spec_ctrl, "MSR_SPEC_CTRL virtualisation"); + P(cpu_has_svm_vnmi, "Virtual NMI"); P(cpu_has_svm_bus_lock, "Bus Lock Filter"); #undef P diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c index e583ef8548..e90bbac332 100644 --- a/xen/arch/x86/hvm/svm/vmcb.c +++ b/xen/arch/x86/hvm/svm/vmcb.c @@ -184,6 +184,8 @@ static int construct_vmcb(struct vcpu *v) if ( default_xen_spec_ctrl == SPEC_CTRL_STIBP ) v->arch.msrs->spec_ctrl.raw = SPEC_CTRL_STIBP; + vmcb->_vintr.fields.vnmi_enable = cpu_has_svm_vnmi; + return 0; } -- 2.52.0
