On 11.03.2026 15:27, Alejandro Vallejo wrote:
> Remove cross-vendor support now that VMs can no longer have a different
> vendor than the host.
> 
> While at it, refactor the function to exit early and skip initialising
> the emulation context when FEP is not enabled.
> 
> No functional change intended.
> 
> Signed-off-by: Alejandro Vallejo <[email protected]>
> ---
> v4:
>   * Reverted refactor of the `walk` variable assignment

"Revert" as in "move it even farther away from the original". As said, you
want re-indentation, so please do just that, nothing else that isn't
explicitly justified (like the moving of hvm_emulate_init_once() is). With
this put back in its original shape (can do while committing, I suppose):
Reviewed-by: Jan Beulich <[email protected]>

> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3832,67 +3832,50 @@ int hvm_descriptor_access_intercept(uint64_t 
> exit_info,
>      return X86EMUL_OKAY;
>  }
>  
> -static bool cf_check is_cross_vendor(
> -    const struct x86_emulate_state *state, const struct x86_emulate_ctxt 
> *ctxt)
> -{
> -    switch ( ctxt->opcode )
> -    {
> -    case X86EMUL_OPC(0x0f, 0x05): /* syscall */
> -    case X86EMUL_OPC(0x0f, 0x34): /* sysenter */
> -    case X86EMUL_OPC(0x0f, 0x35): /* sysexit */
> -        return true;
> -    }
> -
> -    return false;
> -}
> -
>  void hvm_ud_intercept(struct cpu_user_regs *regs)
>  {
>      struct vcpu *cur = current;
> -    bool should_emulate =
> -        cur->domain->arch.cpuid->x86_vendor != boot_cpu_data.x86_vendor;
>      struct hvm_emulate_ctxt ctxt;
> +    const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
> +    uint32_t walk;
> +    unsigned long addr;
> +    char sig[5]; /* ud2; .ascii "xen" */
>  
> -    hvm_emulate_init_once(&ctxt, opt_hvm_fep ? NULL : is_cross_vendor, regs);
> -
> -    if ( opt_hvm_fep )
> +    if ( !opt_hvm_fep )
>      {
> -        const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
> -        uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
> -                         ? PFEC_user_mode : 0) | PFEC_insn_fetch;
> -        unsigned long addr;
> -        char sig[5]; /* ud2; .ascii "xen" */
> -
> -        if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
> -                                        sizeof(sig), hvm_access_insn_fetch,
> -                                        cs, &addr) &&
> -             (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
> -                                         walk, NULL) == HVMTRANS_okay) &&
> -             (memcmp(sig, "\xf\xb" "xen", sizeof(sig)) == 0) )
> -        {
> -            regs->rip += sizeof(sig);
> -            regs->eflags &= ~X86_EFLAGS_RF;
> -
> -            /* Zero the upper 32 bits of %rip if not in 64bit mode. */
> -            if ( !(hvm_long_mode_active(cur) && cs->l) )
> -                regs->rip = (uint32_t)regs->rip;
> +        ASSERT_UNREACHABLE();
> +        goto reinject;
> +    }
>  
> -            add_taint(TAINT_HVM_FEP);
> +    hvm_emulate_init_once(&ctxt, NULL, regs);
>  
> -            should_emulate = true;
> -        }
> -    }
> +    walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
> +            ? PFEC_user_mode : 0) | PFEC_insn_fetch;
>  
> -    if ( !should_emulate )
> +    if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
> +                                    sizeof(sig), hvm_access_insn_fetch,
> +                                    cs, &addr) &&
> +         (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
> +                                     walk, NULL) == HVMTRANS_okay) &&
> +         (memcmp(sig, "\xf\xb" "xen", sizeof(sig)) == 0) )
>      {
> -        hvm_inject_hw_exception(X86_EXC_UD, X86_EVENT_NO_EC);
> -        return;
> +        regs->rip += sizeof(sig);
> +        regs->eflags &= ~X86_EFLAGS_RF;
> +
> +        /* Zero the upper 32 bits of %rip if not in 64bit mode. */
> +        if ( !(hvm_long_mode_active(cur) && cs->l) )
> +            regs->rip = (uint32_t)regs->rip;
> +
> +        add_taint(TAINT_HVM_FEP);
>      }
> +    else
> +        goto reinject;
>  
>      switch ( hvm_emulate_one(&ctxt, VIO_no_completion) )
>      {
>      case X86EMUL_UNHANDLEABLE:
>      case X86EMUL_UNIMPLEMENTED:
> + reinject:

I'm inclined to suggest to indent this the same as the case labels.

Jan

Reply via email to