On 13/02/2026 10:44 pm, Abdelkareem Abdelsaamad wrote:
> With vNMI, the pending NMI is simply stuffed into the VMCB and handed off
> to the hardware. This means that Xen needs to be able to set a vNMI pending
> on-demand, and also query if a vNMI is pending, e.g. to honor the "at most
> one NMI pending" rule and to preserve all NMIs across save and restore.
>
> Introduce two new hvm_function_table callbacks to support the SVM's vNMI to
> allow the Xen hypervisor to query if a vNMI is pending and to set VMCB's
> _vintr pending flag so the NMIs are serviced by hardware if/when the virtual
> NMIs become unblocked.
>
> Signed-off-by: Abdelkareem Abdelsaamad <[email protected]>
> ---
>  xen/arch/x86/hvm/svm/intr.c        | 13 ++++++++++--
>  xen/arch/x86/hvm/svm/svm.c         | 33 ++++++++++++++++++++++++++++--
>  xen/arch/x86/hvm/svm/vmcb.c        |  2 ++
>  xen/arch/x86/include/asm/hvm/hvm.h |  9 ++++++++
>  4 files changed, 53 insertions(+), 4 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
> index 6453a46b85..bc52f8e189 100644
> --- a/xen/arch/x86/hvm/svm/intr.c
> +++ b/xen/arch/x86/hvm/svm/intr.c
> @@ -29,10 +29,19 @@
>  
>  static void svm_inject_nmi(struct vcpu *v)
>  {
> -    struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
> -    u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
> +    struct vmcb_struct *vmcb;
> +    u32 general1_intercepts;
>      intinfo_t event;
>  
> +    if ( hvm_funcs.is_vnmi_enabled(v) )
> +    {
> +        hvm_funcs.set_vnmi_pending(v);
> +        return;
> +    }
> +
> +    vmcb = v->arch.hvm.svm.vmcb;
> +    general1_intercepts = vmcb_get_general1_intercepts(vmcb);
> +

There's no need to defer these assignments.

When the HVM hooks are deleted, the correct logic here is:

    if ( vmcb->_vintr.fields.vnmi_enable )
    {
        vmcb->_vintr.fields.vnmi_pending = true;
        return;
    }

> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 6e380890bd..00e5630025 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -545,7 +571,7 @@ static unsigned cf_check int 
> svm_get_interrupt_shadow(struct vcpu *v)
>      struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
>      unsigned int intr_shadow = 0;
>  
> -    if ( vmcb->int_stat.intr_shadow )
> +    if ( vmcb->int_stat.intr_shadow || svm_is_vnmi_masked(v) )
>          intr_shadow |= HVM_INTR_SHADOW_MOV_SS | HVM_INTR_SHADOW_STI;
>  
>      if ( vmcb_get_general1_intercepts(vmcb) & GENERAL1_INTERCEPT_IRET )

It's only HVM_INTR_SHADOW_NMI which vNMI applies to.

diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 18ba837738c6..f5c7ea0b0dbe 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -548,7 +548,9 @@ static unsigned cf_check int 
svm_get_interrupt_shadow(struct vcpu *v)
     if ( vmcb->int_stat.intr_shadow )
         intr_shadow |= HVM_INTR_SHADOW_MOV_SS | HVM_INTR_SHADOW_STI;
 
-    if ( vmcb_get_general1_intercepts(vmcb) & GENERAL1_INTERCEPT_IRET )
+    if ( vmcb->_vintr.fields.vnmi_enable
+         ? vmcb->_vintr.fields.vnmi_blocked
+         : (vmcb_get_general1_intercepts(vmcb) & GENERAL1_INTERCEPT_IRET) )
         intr_shadow |= HVM_INTR_SHADOW_NMI;
 
     return intr_shadow;


~Andrew

Reply via email to