With vNMI, the pending NMI is simply stuffed into the VMCB and handed off
to the hardware. This means that Xen needs to be able to set a vNMI pending
on-demand, and also query if a vNMI is pending, e.g. to honor the "at most
one NMI pending" rule and to preserve all NMIs across save and restore.

Introduce two new hvm_function_table callbacks to support the SVM's vNMI to
allow the Xen hypervisor to query if a vNMI is pending and to set VMCB's
_vintr pending flag so the NMIs are serviced by hardware if/when the virtual
NMIs become unblocked.

Signed-off-by: Abdelkareem Abdelsaamad <[email protected]>
---
 xen/arch/x86/hvm/svm/intr.c        | 13 ++++++++++--
 xen/arch/x86/hvm/svm/svm.c         | 33 ++++++++++++++++++++++++++++--
 xen/arch/x86/hvm/svm/vmcb.c        |  2 ++
 xen/arch/x86/include/asm/hvm/hvm.h |  9 ++++++++
 4 files changed, 53 insertions(+), 4 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index 6453a46b85..bc52f8e189 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -29,10 +29,19 @@
 
 static void svm_inject_nmi(struct vcpu *v)
 {
-    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
-    u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
+    struct vmcb_struct *vmcb;
+    u32 general1_intercepts;
     intinfo_t event;
 
+    if ( hvm_funcs.is_vnmi_enabled(v) )
+    {
+        hvm_funcs.set_vnmi_pending(v);
+        return;
+    }
+
+    vmcb = v->arch.hvm.svm.vmcb;
+    general1_intercepts = vmcb_get_general1_intercepts(vmcb);
+
     event.raw = 0;
     event.v = true;
     event.type = X86_ET_NMI;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 6e380890bd..00e5630025 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -297,6 +297,32 @@ void svm_intercept_msr(struct vcpu *v, uint32_t msr, int 
flags)
         __clear_bit(msr * 2 + 1, msr_bit);
 }
 
+static bool cf_check svm_is_vnmi_enabled(struct vcpu *vcpu)
+{
+    return ( hvm_funcs.caps.vNMI &&
+             vcpu->arch.hvm.svm.vmcb->_vintr.fields.vnmi_enable );
+}
+
+static bool cf_check svm_is_vnmi_masked(struct vcpu *vcpu)
+{
+    if ( !svm_is_vnmi_enabled(vcpu) )
+        return false;
+
+    return ( vcpu->arch.hvm.svm.vmcb->_vintr.fields.vnmi_blocking );
+}
+
+static bool cf_check svm_set_vnmi_pending(struct vcpu *vcpu)
+{
+    if ( !svm_is_vnmi_enabled(vcpu) )
+        return false;
+
+    if ( vcpu->arch.hvm.svm.vmcb->_vintr.fields.vnmi_pending )
+        return false;
+
+    vcpu->arch.hvm.svm.vmcb->_vintr.fields.vnmi_pending = 1;
+    return true;
+}
+
 #ifdef CONFIG_VM_EVENT
 static void cf_check svm_enable_msr_interception(struct domain *d, uint32_t 
msr)
 {
@@ -545,7 +571,7 @@ static unsigned cf_check int 
svm_get_interrupt_shadow(struct vcpu *v)
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     unsigned int intr_shadow = 0;
 
-    if ( vmcb->int_stat.intr_shadow )
+    if ( vmcb->int_stat.intr_shadow || svm_is_vnmi_masked(v) )
         intr_shadow |= HVM_INTR_SHADOW_MOV_SS | HVM_INTR_SHADOW_STI;
 
     if ( vmcb_get_general1_intercepts(vmcb) & GENERAL1_INTERCEPT_IRET )
@@ -2464,7 +2490,9 @@ static struct hvm_function_table __initdata_cf_clobber 
svm_function_table = {
 #endif
     .set_rdtsc_exiting    = svm_set_rdtsc_exiting,
     .get_insn_bytes       = svm_get_insn_bytes,
-
+    .is_vnmi_enabled      = svm_is_vnmi_enabled,
+    .is_vnmi_masked       = svm_is_vnmi_masked,
+    .set_vnmi_pending     = svm_set_vnmi_pending,
     .nhvm_vcpu_initialise = nsvm_vcpu_initialise,
     .nhvm_vcpu_destroy = nsvm_vcpu_destroy,
     .nhvm_vcpu_reset = nsvm_vcpu_reset,
@@ -2524,6 +2552,7 @@ const struct hvm_function_table * __init start_svm(void)
     P(cpu_has_tsc_ratio, "TSC Rate MSR");
     P(cpu_has_svm_sss, "NPT Supervisor Shadow Stack");
     P(cpu_has_svm_spec_ctrl, "MSR_SPEC_CTRL virtualisation");
+    P(cpu_has_svm_vnmi, "Virtual NMI");
     P(cpu_has_svm_bus_lock, "Bus Lock Filter");
 #undef P
 
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index e583ef8548..e90bbac332 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -184,6 +184,8 @@ static int construct_vmcb(struct vcpu *v)
     if ( default_xen_spec_ctrl == SPEC_CTRL_STIBP )
         v->arch.msrs->spec_ctrl.raw = SPEC_CTRL_STIBP;
 
+    vmcb->_vintr.fields.vnmi_enable = cpu_has_svm_vnmi;
+
     return 0;
 }
 
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h 
b/xen/arch/x86/include/asm/hvm/hvm.h
index ad17ea73e9..9dc8b35f91 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -224,6 +224,15 @@ struct hvm_function_table {
     int (*pi_update_irte)(const struct vcpu *v, const struct pirq *pirq,
                           uint8_t gvec);
     void (*update_vlapic_mode)(struct vcpu *v);
+    /* Whether or not virtual NMI is enabled. */
+    bool (*is_vnmi_enabled)(struct vcpu *vcpu);
+    /* Whether or not a virtual NMI is masked in hardware. */
+    bool (*is_vnmi_masked)(struct vcpu *vcpu);
+    /*
+     * Attempt to pend a virtual NMI in harware.
+     * Returns %true on success
+     */
+    bool (*set_vnmi_pending)(struct vcpu *vcpu);
 
     /*Walk nested p2m  */
     int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
-- 
2.52.0


Reply via email to