Not a functional change.

Signed-off-by: Alejandro Vallejo <[email protected]>
---
 xen/arch/x86/acpi/cpu_idle.c        | 19 ++++++++++---------
 xen/arch/x86/acpi/cpufreq/acpi.c    |  2 +-
 xen/arch/x86/acpi/cpufreq/cpufreq.c |  4 ++--
 xen/arch/x86/apic.c                 |  2 +-
 xen/arch/x86/cpu-policy.c           | 10 ++++++----
 xen/arch/x86/cpuid.c                |  4 ++--
 xen/arch/x86/dom0_build.c           |  3 ++-
 xen/arch/x86/domain.c               | 25 +++++++++++--------------
 xen/arch/x86/e820.c                 |  3 ++-
 xen/arch/x86/hvm/hvm.c              |  5 +++--
 xen/arch/x86/hvm/ioreq.c            |  2 +-
 xen/arch/x86/hvm/vmx/vmx.c          |  6 +++---
 xen/arch/x86/include/asm/guest_pt.h |  4 ++--
 xen/arch/x86/irq.c                  |  4 ++--
 xen/arch/x86/pv/emul-priv-op.c      | 24 ++++++++++++------------
 xen/arch/x86/setup.c                |  2 +-
 16 files changed, 61 insertions(+), 58 deletions(-)

diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c
index d60a07bfd5..1d7f19aadb 100644
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -178,7 +178,7 @@ static void cf_check do_get_hw_residencies(void *arg)
     struct cpuinfo_x86 *c = &current_cpu_data;
     struct hw_residencies *hw_res = arg;
 
-    if ( c->x86_vendor != X86_VENDOR_INTEL || c->x86 != 6 )
+    if ( !x86_vendor_is(c->x86_vendor, X86_VENDOR_INTEL) || c->x86 != 6 )
         return;
 
     switch ( c->x86_model )
@@ -915,8 +915,8 @@ void cf_check acpi_dead_idle(void)
             mwait(cx->address, 0);
         }
     }
-    else if ( (current_cpu_data.x86_vendor &
-               (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
+    else if ( x86_vendor_is(current_cpu_data.x86_vendor,
+                            X86_VENDOR_AMD | X86_VENDOR_HYGON) &&
               cx->entry_method == ACPI_CSTATE_EM_SYSIO )
     {
         /* Intel prefers not to use SYSIO */
@@ -1042,8 +1042,9 @@ static void acpi_processor_power_init_bm_check(struct 
acpi_processor_flags *flag
     flags->bm_check = 0;
     if ( num_online_cpus() == 1 )
         flags->bm_check = 1;
-    else if ( (c->x86_vendor == X86_VENDOR_INTEL) ||
-              ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 0x15)) )
+    else if ( x86_vendor_is(c->x86_vendor, X86_VENDOR_INTEL) ||
+              (x86_vendor_is(c->x86_vendor,
+                             X86_VENDOR_AMD) && (c->x86 == 0x15)) )
     {
         /*
          * Today all MP CPUs that support C3 share cache.
@@ -1059,7 +1060,7 @@ static void acpi_processor_power_init_bm_check(struct 
acpi_processor_flags *flag
      * is not required while entering C3 type state on
      * P4, Core and beyond CPUs
      */
-    if ( c->x86_vendor == X86_VENDOR_INTEL &&
+    if ( x86_vendor_is(c->x86_vendor, X86_VENDOR_INTEL) &&
         (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)) )
             flags->bm_control = 0;
 }
@@ -1421,7 +1422,7 @@ static void amd_cpuidle_init(struct acpi_processor_power 
*power)
     case 0x1a:
     case 0x19:
     case 0x18:
-        if ( boot_cpu_data.x86_vendor != X86_VENDOR_HYGON )
+        if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_HYGON) )
         {
     default:
             vendor_override = -1;
@@ -1648,8 +1649,8 @@ static int cf_check cpu_callback(
         break;
 
     case CPU_ONLINE:
-        if ( (boot_cpu_data.x86_vendor &
-              (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
+        if ( x86_vendor_is(boot_cpu_data.x86_vendor,
+                           X86_VENDOR_AMD | X86_VENDOR_HYGON) &&
              processor_powers[cpu] )
             amd_cpuidle_init(processor_powers[cpu]);
         break;
diff --git a/xen/arch/x86/acpi/cpufreq/acpi.c b/xen/arch/x86/acpi/cpufreq/acpi.c
index b027459417..b8bfb9fb56 100644
--- a/xen/arch/x86/acpi/cpufreq/acpi.c
+++ b/xen/arch/x86/acpi/cpufreq/acpi.c
@@ -471,7 +471,7 @@ static int cf_check acpi_cpufreq_cpu_init(struct 
cpufreq_policy *policy)
 
     /* Check for APERF/MPERF support in hardware
      * also check for boost support */
-    if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6)
+    if (x86_vendor_is(c->x86_vendor, X86_VENDOR_INTEL) && c->cpuid_level >= 6)
         on_selected_cpus(cpumask_of(cpu), feature_detect, policy, 1);
 
     /*
diff --git a/xen/arch/x86/acpi/cpufreq/cpufreq.c 
b/xen/arch/x86/acpi/cpufreq/cpufreq.c
index 89e2b3d167..27770269ba 100644
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -244,8 +244,8 @@ __initcall(cpufreq_driver_late_init);
 int cpufreq_cpu_init(unsigned int cpu)
 {
     /* Currently we only handle Intel, AMD and Hygon processor */
-    if ( boot_cpu_data.x86_vendor &
-         (X86_VENDOR_INTEL | X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+    if ( x86_vendor_is(boot_cpu_data.x86_vendor,
+                       (X86_VENDOR_INTEL | X86_VENDOR_AMD | X86_VENDOR_HYGON)) 
)
         return cpufreq_add_cpu(cpu);
 
     return -EOPNOTSUPP;
diff --git a/xen/arch/x86/apic.c b/xen/arch/x86/apic.c
index fb38be7ec3..6e78250a4a 100644
--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -406,7 +406,7 @@ void __init init_bsp_APIC(void)
     value |= APIC_SPIV_APIC_ENABLED;
     
     /* This bit is reserved on P4/Xeon and should be cleared */
-    if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 
== 15))
+    if (x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) && 
(boot_cpu_data.x86 == 15))
         value &= ~APIC_SPIV_FOCUS_DISABLED;
     else
         value |= APIC_SPIV_FOCUS_DISABLED;
diff --git a/xen/arch/x86/cpu-policy.c b/xen/arch/x86/cpu-policy.c
index 1acd7c5124..3f3c95eb82 100644
--- a/xen/arch/x86/cpu-policy.c
+++ b/xen/arch/x86/cpu-policy.c
@@ -759,7 +759,8 @@ static void __init calculate_hvm_max_policy(void)
      * long mode (and init_amd() has cleared it out of host capabilities), but
      * HVM guests are able if running in protected mode.
      */
-    if ( (boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
+    if ( x86_vendor_is(boot_cpu_data.vendor,
+                       X86_VENDOR_AMD | X86_VENDOR_HYGON) &&
          raw_cpu_policy.basic.sep )
         __set_bit(X86_FEATURE_SEP, fs);
 
@@ -952,8 +953,9 @@ void recalculate_cpuid_policy(struct domain *d)
     p->basic.max_leaf   = min(p->basic.max_leaf,   max->basic.max_leaf);
     p->feat.max_subleaf = min(p->feat.max_subleaf, max->feat.max_subleaf);
     p->extd.max_leaf    = 0x80000000U | min(p->extd.max_leaf & 0xffff,
-                                            ((p->x86_vendor & (X86_VENDOR_AMD |
-                                                               
X86_VENDOR_HYGON))
+                                            (x86_vendor_is(p->x86_vendor,
+                                                           X86_VENDOR_AMD |
+                                                           X86_VENDOR_HYGON)
                                              ? CPUID_GUEST_NR_EXTD_AMD
                                              : CPUID_GUEST_NR_EXTD_INTEL) - 1);
 
@@ -987,7 +989,7 @@ void recalculate_cpuid_policy(struct domain *d)
     if ( is_pv_32bit_domain(d) )
     {
         __clear_bit(X86_FEATURE_LM, max_fs);
-        if ( !(boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+        if ( !(x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD | 
X86_VENDOR_HYGON)) )
             __clear_bit(X86_FEATURE_SYSCALL, max_fs);
     }
 
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index 5decfad8cd..7eca6ad2f5 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -437,7 +437,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
 
     case 0xa:
         /* TODO: Rework vPMU control in terms of toolstack choices. */
-        if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+        if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||
              !vpmu_available(v) )
             *res = EMPTY_LEAF;
         else
@@ -483,7 +483,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
 
     case 0x80000001U:
         /* SYSCALL is hidden outside of long mode on Intel. */
-        if ( p->x86_vendor == X86_VENDOR_INTEL &&
+        if ( x86_vendor_is(p->x86_vendor, X86_VENDOR_INTEL) &&
              is_hvm_domain(d) && !hvm_long_mode_active(v) )
             res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
 
diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index 0b467fd4a4..11cfd6be20 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -572,7 +572,8 @@ int __init dom0_setup_permissions(struct domain *d)
             rc |= iomem_deny_access(d, mfn, mfn);
     }
     /* HyperTransport range. */
-    if ( boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+    if ( x86_vendor_is(boot_cpu_data.x86_vendor,
+                       X86_VENDOR_AMD | X86_VENDOR_HYGON) )
     {
         mfn = paddr_to_pfn(1UL <<
                            (boot_cpu_data.x86 < 0x17 ? 40 : paddr_bits));
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index f3e4ae4a4d..3c878c92a5 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -370,7 +370,7 @@ void domain_cpu_policy_changed(struct domain *d)
         {
             uint64_t mask = cpuidmask_defaults._6c;
 
-            if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+            if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD) )
                 mask &= (~0ULL << 32) | p->basic.raw[6].c;
 
             d->arch.pv.cpuidmasks->_6c = mask;
@@ -385,8 +385,8 @@ void domain_cpu_policy_changed(struct domain *d)
              * wholesale from the policy, but clamp the features in 7[0].ebx
              * per usual.
              */
-            if ( boot_cpu_data.x86_vendor &
-                 (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+            if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD |
+                                                         X86_VENDOR_HYGON) )
                 mask = (((uint64_t)p->feat.max_subleaf << 32) |
                         ((uint32_t)mask & p->feat._7b0));
 
@@ -398,7 +398,7 @@ void domain_cpu_policy_changed(struct domain *d)
             uint64_t mask = cpuidmask_defaults.Da1;
             uint32_t eax = p->xstate.Da1;
 
-            if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+            if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
                 mask &= (~0ULL << 32) | eax;
 
             d->arch.pv.cpuidmasks->Da1 = mask;
@@ -422,17 +422,15 @@ void domain_cpu_policy_changed(struct domain *d)
              * If not emulating AMD or Hygon, clear the duplicated features
              * in e1d.
              */
-            if ( !(p->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+            if ( !x86_vendor_is(p->x86_vendor, X86_VENDOR_AMD |
+                                               X86_VENDOR_HYGON) )
                 edx &= ~CPUID_COMMON_1D_FEATURES;
 
-            switch ( boot_cpu_data.x86_vendor )
-            {
-            case X86_VENDOR_INTEL:
+            if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
                 mask &= ((uint64_t)edx << 32) | ecx;
-                break;
-
-            case X86_VENDOR_AMD:
-            case X86_VENDOR_HYGON:
+            else if ( x86_vendor_is(boot_cpu_data.x86_vendor,
+                                    X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+            {
                 mask &= ((uint64_t)ecx << 32) | edx;
 
                 /*
@@ -443,7 +441,6 @@ void domain_cpu_policy_changed(struct domain *d)
                 edx = cpufeat_mask(X86_FEATURE_APIC);
 
                 mask |= ((uint64_t)ecx << 32) | edx;
-                break;
             }
 
             d->arch.pv.cpuidmasks->e1cd = mask;
@@ -455,7 +452,7 @@ void domain_cpu_policy_changed(struct domain *d)
         cpu_policy_updated(v);
 
         /* If PMU version is zero then the guest doesn't have VPMU */
-        if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+        if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) &&
              p->basic.pmu_version == 0 )
             vpmu_destroy(v);
     }
diff --git a/xen/arch/x86/e820.c b/xen/arch/x86/e820.c
index ca577c0bde..3cbcd98254 100644
--- a/xen/arch/x86/e820.c
+++ b/xen/arch/x86/e820.c
@@ -426,7 +426,8 @@ static uint64_t __init mtrr_top_of_ram(void)
 
     /* By default we check only Intel systems. */
     if ( e820_mtrr_clip == -1 )
-        e820_mtrr_clip = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
+        e820_mtrr_clip = x86_vendor_is(boot_cpu_data.x86_vendor,
+                                       X86_VENDOR_INTEL);
 
     if ( !e820_mtrr_clip )
         return 0;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4084b610fa..8e4050b8ce 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2668,8 +2668,9 @@ bool hvm_vcpu_virtual_to_linear(
         }
         else if ( last_byte > reg->limit )
             goto out; /* last byte is beyond limit */
-        else if ( last_byte < offset &&
-                  v->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
+        else if ( x86_vendor_is(v->domain->arch.cpuid->x86_vendor,
+                                X86_VENDOR_AMD) &&
+                  last_byte < offset )
             goto out; /* access wraps */
     }
 
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index a5fa97e149..8bdf52b506 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -286,7 +286,7 @@ bool arch_ioreq_server_get_type_addr(const struct domain *d,
         *addr = ((uint64_t)sbdf.sbdf << 32) | reg;
         /* AMD extended configuration space access? */
         if ( CF8_ADDR_HI(cf8) &&
-             d->arch.cpuid->x86_vendor == X86_VENDOR_AMD &&
+             x86_vendor_is(d->arch.cpuid->x86_vendor, X86_VENDOR_AMD) &&
              (x86_fam = get_cpu_family(
                  d->arch.cpuid->basic.raw_fms, NULL, NULL)) >= 0x10 &&
              x86_fam < 0x17 )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 6b407226c4..769840569c 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -3057,7 +3057,7 @@ static bool __init has_if_pschange_mc(void)
      * IF_PSCHANGE_MC is only known to affect Intel Family 6 processors at
      * this time.
      */
-    if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+    if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||
          boot_cpu_data.x86 != 6 )
         return false;
 
@@ -3409,7 +3409,7 @@ static void __init lbr_tsx_fixup_check(void)
      * fixed up as well.
      */
     if ( cpu_has_hle || cpu_has_rtm ||
-         boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+         !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||
          boot_cpu_data.x86 != 6 )
         return;
 
@@ -3454,7 +3454,7 @@ static void __init ler_to_fixup_check(void)
      * that are not equal to bit[47].  Attempting to context switch this value
      * may cause a #GP.  Software should sign extend the MSR.
      */
-    if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+    if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||
          boot_cpu_data.x86 != 6 )
         return;
 
diff --git a/xen/arch/x86/include/asm/guest_pt.h 
b/xen/arch/x86/include/asm/guest_pt.h
index 21473f9bbc..a44909d35d 100644
--- a/xen/arch/x86/include/asm/guest_pt.h
+++ b/xen/arch/x86/include/asm/guest_pt.h
@@ -314,8 +314,8 @@ static always_inline bool guest_l4e_rsvd_bits(const struct 
vcpu *v,
                                               guest_l4e_t l4e)
 {
     return l4e.l4 & (guest_rsvd_bits(v) | GUEST_L4_PAGETABLE_RSVD |
-                     ((v->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD)
-                      ? _PAGE_GLOBAL : 0));
+                     (x86_vendor_is(v->domain->arch.cpuid->x86_vendor,
+                                    X86_VENDOR_AMD) ? _PAGE_GLOBAL : 0));
 }
 #endif /* GUEST_PAGING_LEVELS >= 4 */
 #endif /* GUEST_PAGING_LEVELS >= 3 */
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 92b8604dc8..2490331ec3 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -2010,8 +2010,8 @@ void do_IRQ(struct cpu_user_regs *regs)
                      * interrupts have been delivered to CPUs
                      * different than the BSP.
                      */
-                    (boot_cpu_data.x86_vendor & (X86_VENDOR_AMD |
-                                                 X86_VENDOR_HYGON))) &&
+                    x86_vendor_is(boot_cpu_data.x86_vendor,
+                                  X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
                    bogus_8259A_irq(vector - FIRST_LEGACY_VECTOR)) )
             {
                 printk("CPU%u: No irq handler for vector %02x (IRQ %d%s)\n",
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 225d4cff03..e69450f949 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -244,7 +244,7 @@ static bool pci_cfg_ok(struct domain *currd, unsigned int 
start,
     start |= CF8_ADDR_LO(currd->arch.pci_cf8);
     /* AMD extended configuration space access? */
     if ( CF8_ADDR_HI(currd->arch.pci_cf8) &&
-         boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+         x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD) &&
          boot_cpu_data.x86 >= 0x10 && boot_cpu_data.x86 < 0x17 )
     {
         uint64_t msr_val;
@@ -868,7 +868,7 @@ static uint64_t guest_efer(const struct domain *d)
      */
     if ( is_pv_32bit_domain(d) )
         val &= ~(EFER_LME | EFER_LMA |
-                 (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
+                 (x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL)
                   ? EFER_SCE : 0));
     return val;
 }
@@ -943,7 +943,7 @@ static int cf_check read_msr(
     case MSR_K8_PSTATE5:
     case MSR_K8_PSTATE6:
     case MSR_K8_PSTATE7:
-        if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
+        if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD) )
             break;
         if ( unlikely(is_cpufreq_controller(currd)) )
             goto normal;
@@ -951,7 +951,7 @@ static int cf_check read_msr(
         return X86EMUL_OKAY;
 
     case MSR_FAM10H_MMIO_CONF_BASE:
-        if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ||
+        if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD) ||
              boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 >= 0x17 )
             break;
         /* fall through */
@@ -976,7 +976,7 @@ static int cf_check read_msr(
     case MSR_P6_EVNTSEL(0) ... MSR_P6_EVNTSEL(3):
     case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2:
     case MSR_CORE_PERF_FIXED_CTR_CTRL ... MSR_CORE_PERF_GLOBAL_OVF_CTRL:
-        if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+        if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
         {
             vpmu_msr = true;
             /* fall through */
@@ -1103,7 +1103,7 @@ static int cf_check write_msr(
         break;
 
     case MSR_FAM10H_MMIO_CONF_BASE:
-        if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ||
+        if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD) ||
              boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 >= 0x17 )
             break;
         if ( !is_hwdom_pinned_vcpu(curr) )
@@ -1132,8 +1132,8 @@ static int cf_check write_msr(
 
     case MSR_IA32_MPERF:
     case MSR_IA32_APERF:
-        if ( !(boot_cpu_data.x86_vendor &
-               (X86_VENDOR_INTEL | X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+        if ( !x86_vendor_is(boot_cpu_data.x86_vendor,
+                 X86_VENDOR_INTEL | X86_VENDOR_AMD | X86_VENDOR_HYGON) )
             break;
         if ( likely(!is_cpufreq_controller(currd)) ||
              wrmsr_safe(reg, val) == 0 )
@@ -1142,7 +1142,7 @@ static int cf_check write_msr(
 
     case MSR_IA32_THERM_CONTROL:
     case MSR_IA32_ENERGY_PERF_BIAS:
-        if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
+        if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
             break;
         if ( !is_hwdom_pinned_vcpu(curr) || wrmsr_safe(reg, val) == 0 )
             return X86EMUL_OKAY;
@@ -1152,13 +1152,13 @@ static int cf_check write_msr(
     case MSR_P6_EVNTSEL(0) ... MSR_P6_EVNTSEL(3):
     case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2:
     case MSR_CORE_PERF_FIXED_CTR_CTRL ... MSR_CORE_PERF_GLOBAL_OVF_CTRL:
-        if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+        if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
         {
             vpmu_msr = true;
     case MSR_AMD_FAM15H_EVNTSEL0 ... MSR_AMD_FAM15H_PERFCTR5:
     case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
-            if ( vpmu_msr || (boot_cpu_data.x86_vendor &
-                              (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+            if ( vpmu_msr || x86_vendor_is(boot_cpu_data.x86_vendor, 
+                                           X86_VENDOR_AMD | X86_VENDOR_HYGON) )
             {
                 if ( (vpmu_mode & XENPMU_MODE_ALL) &&
                      !is_hardware_domain(currd) )
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 872a8c63f9..0eca058850 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1384,7 +1384,7 @@ void asmlinkage __init noreturn __start_xen(void)
          * supervisor shadow stacks are now safe to use.
          */
         bool cpu_has_bug_shstk_fracture =
-            boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+            x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) &&
             !boot_cpu_has(X86_FEATURE_CET_SSS);
 
         /*
-- 
2.43.0


Reply via email to