This enables each branch to be optimised-out when the vendor isn't
enabled in Kconfig and combines N checks into 1 for the strict fallthrough
cases.

Plus, the diffstat looks fantastic and we save tons of vertical space.

Signed-off-by: Alejandro Vallejo <[email protected]>
---
 xen/arch/x86/acpi/cpufreq/cpufreq.c | 28 ++++++------------
 xen/arch/x86/alternative.c          | 30 ++++++++-----------
 xen/arch/x86/cpu-policy.c           | 31 ++++++++-----------
 xen/arch/x86/cpu/mcheck/mce.c       | 27 +++--------------
 xen/arch/x86/cpu/mcheck/mce.h       | 20 ++++++-------
 xen/arch/x86/cpu/mcheck/non-fatal.c | 20 ++++---------
 xen/arch/x86/cpu/mcheck/vmce.c      | 46 +++++++----------------------
 xen/arch/x86/domain.c               | 12 ++++----
 xen/arch/x86/guest/xen/xen.c        | 19 +++++-------
 xen/arch/x86/nmi.c                  | 18 ++++-------
 xen/arch/x86/traps-setup.c          | 18 +++++------
 11 files changed, 87 insertions(+), 182 deletions(-)

diff --git a/xen/arch/x86/acpi/cpufreq/cpufreq.c 
b/xen/arch/x86/acpi/cpufreq/cpufreq.c
index 94e8e11c15..89e2b3d167 100644
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c
@@ -135,20 +135,17 @@ static int __init cf_check cpufreq_driver_init(void)
 
         ret = -ENOENT;
 
-        switch ( boot_cpu_data.x86_vendor )
+        if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
         {
-        case X86_VENDOR_INTEL:
             for ( i = 0; i < cpufreq_xen_cnt; i++ )
             {
                 switch ( cpufreq_xen_opts[i] )
                 {
                 case CPUFREQ_xen:
-                    ret = IS_ENABLED(CONFIG_INTEL) ?
-                          acpi_cpufreq_register() : -ENODEV;
+                    ret = acpi_cpufreq_register();
                     break;
                 case CPUFREQ_hwp:
-                    ret = IS_ENABLED(CONFIG_INTEL) ?
-                          hwp_register_driver() : -ENODEV;
+                    ret = hwp_register_driver();
                     break;
                 case CPUFREQ_none:
                     ret = 0;
@@ -163,11 +160,10 @@ static int __init cf_check cpufreq_driver_init(void)
                 if ( !ret || ret == -EBUSY )
                     break;
             }
-            break;
-
-        case X86_VENDOR_AMD:
-        case X86_VENDOR_HYGON:
-#ifdef CONFIG_AMD
+        }
+        else if ( x86_vendor_is(boot_cpu_data.x86_vendor,
+                                X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+        {
             for ( i = 0; i < cpufreq_xen_cnt; i++ )
             {
                 switch ( cpufreq_xen_opts[i] )
@@ -193,15 +189,9 @@ static int __init cf_check cpufreq_driver_init(void)
                 if ( !ret || ret == -EBUSY )
                     break;
             }
-#else
-            ret = -ENODEV;
-#endif /* CONFIG_AMD */
-            break;
-
-        default:
-            printk(XENLOG_ERR "Cpufreq: unsupported x86 vendor\n");
-            break;
         }
+        else
+            printk(XENLOG_ERR "Cpufreq: unsupported x86 vendor\n");
 
         /*
          * After successful cpufreq driver registeration, XEN_PROCESSOR_PM_CPPC
diff --git a/xen/arch/x86/alternative.c b/xen/arch/x86/alternative.c
index 9f844241bc..071871c242 100644
--- a/xen/arch/x86/alternative.c
+++ b/xen/arch/x86/alternative.c
@@ -89,32 +89,28 @@ static bool init_or_livepatch_read_mostly 
toolchain_nops_are_ideal;
 
 static void __init arch_init_ideal_nops(void)
 {
-    switch ( boot_cpu_data.x86_vendor )
+    if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
     {
-    case X86_VENDOR_INTEL:
         /*
          * Due to a decoder implementation quirk, some specific Intel CPUs
          * actually perform better with the "k8_nops" than with the SDM-
          * recommended NOPs.
          */
-        if ( boot_cpu_data.x86 != 6 )
-            break;
-
-        switch ( boot_cpu_data.x86_model )
+        if ( boot_cpu_data.x86 == 6 )
         {
-        case 0x0f ... 0x1b:
-        case 0x1d ... 0x25:
-        case 0x28 ... 0x2f:
-            ideal_nops = k8_nops;
-            break;
+            switch ( boot_cpu_data.x86_model )
+            {
+            case 0x0f ... 0x1b:
+            case 0x1d ... 0x25:
+            case 0x28 ... 0x2f:
+                ideal_nops = k8_nops;
+                break;
+            }
         }
-        break;
-
-    case X86_VENDOR_AMD:
-        if ( boot_cpu_data.x86 <= 0xf )
-            ideal_nops = k8_nops;
-        break;
     }
+    else if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD) &&
+              (boot_cpu_data.x86 <= 0xf) )
+        ideal_nops = k8_nops;
 
 #ifdef HAVE_AS_NOPS_DIRECTIVE
     if ( memcmp(ideal_nops[ASM_NOP_MAX], toolchain_nops, ASM_NOP_MAX) == 0 )
diff --git a/xen/arch/x86/cpu-policy.c b/xen/arch/x86/cpu-policy.c
index 0a7ef15f72..1acd7c5124 100644
--- a/xen/arch/x86/cpu-policy.c
+++ b/xen/arch/x86/cpu-policy.c
@@ -279,9 +279,8 @@ static void recalculate_misc(struct cpu_policy *p)
 
     p->extd.raw[0x8].d = 0;
 
-    switch ( p->x86_vendor )
+    if ( x86_vendor_is(p->x86_vendor, X86_VENDOR_INTEL) )
     {
-    case X86_VENDOR_INTEL:
         p->basic.l2_nr_queries = 1; /* Fixed to 1 query. */
         p->basic.raw[0x3] = EMPTY_LEAF; /* PSN - always hidden. */
         p->basic.raw[0x9] = EMPTY_LEAF; /* DCA - always hidden. */
@@ -297,10 +296,9 @@ static void recalculate_misc(struct cpu_policy *p)
 
         p->extd.raw[0x8].a &= 0x0000ffff;
         p->extd.raw[0x8].c = 0;
-        break;
-
-    case X86_VENDOR_AMD:
-    case X86_VENDOR_HYGON:
+    }
+    else if ( x86_vendor_is(p->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+    {
         zero_leaves(p->basic.raw, 0x2, 0x3);
         memset(p->cache.raw, 0, sizeof(p->cache.raw));
         zero_leaves(p->basic.raw, 0x9, 0xa);
@@ -331,7 +329,6 @@ static void recalculate_misc(struct cpu_policy *p)
         p->extd.raw[0x20] = EMPTY_LEAF; /* Platform QoS */
         p->extd.raw[0x21].b = 0;
         p->extd.raw[0x21].d = 0;
-        break;
     }
 }
 
@@ -416,9 +413,8 @@ static void __init guest_common_default_leaves(struct 
cpu_policy *p)
 
 static void __init guest_common_max_feature_adjustments(uint32_t *fs)
 {
-    switch ( boot_cpu_data.vendor )
+    if ( x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_INTEL) )
     {
-    case X86_VENDOR_INTEL:
         /*
          * MSR_ARCH_CAPS is just feature data, and we can offer it to guests
          * unconditionally, although limit it to Intel systems as it is highly
@@ -477,9 +473,9 @@ static void __init 
guest_common_max_feature_adjustments(uint32_t *fs)
          */
         if ( test_bit(X86_FEATURE_RTM, fs) )
             __set_bit(X86_FEATURE_RTM_ALWAYS_ABORT, fs);
-        break;
-
-    case X86_VENDOR_AMD:
+    }
+    else if ( x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD) )
+    {
         /*
          * This bit indicates that the VERW instruction may have gained
          * scrubbing side effects.  With pooling, it means "you might migrate
@@ -488,7 +484,6 @@ static void __init 
guest_common_max_feature_adjustments(uint32_t *fs)
          * has been around since the 286.
          */
         __set_bit(X86_FEATURE_VERW_CLEAR, fs);
-        break;
     }
 
     /*
@@ -510,9 +505,8 @@ static void __init 
guest_common_max_feature_adjustments(uint32_t *fs)
 
 static void __init guest_common_default_feature_adjustments(uint32_t *fs)
 {
-    switch ( boot_cpu_data.vendor )
+    if ( x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_INTEL) )
     {
-    case X86_VENDOR_INTEL:
         /*
          * IvyBridge client parts suffer from leakage of RDRAND data due to 
SRBDS
          * (XSA-320 / CVE-2020-0543), and won't be receiving microcode to
@@ -570,9 +564,9 @@ static void __init 
guest_common_default_feature_adjustments(uint32_t *fs)
             __clear_bit(X86_FEATURE_RTM, fs);
             __set_bit(X86_FEATURE_RTM_ALWAYS_ABORT, fs);
         }
-        break;
-
-    case X86_VENDOR_AMD:
+    }
+    else if ( x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD) )
+    {
         /*
          * This bit indicate that the VERW instruction may have gained
          * scrubbing side effects.  The max policy has it set for migration
@@ -581,7 +575,6 @@ static void __init 
guest_common_default_feature_adjustments(uint32_t *fs)
          */
         if ( !cpu_has_verw_clear )
             __clear_bit(X86_FEATURE_VERW_CLEAR, fs);
-        break;
     }
 
     /*
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 9277781bff..9bef1da385 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -766,30 +766,11 @@ void mcheck_init(struct cpuinfo_x86 *c, bool bsp)
     else if ( cpu_bank_alloc(cpu) )
         panic("Insufficient memory for MCE bank allocations\n");
 
-    switch ( c->x86_vendor )
-    {
-#ifdef CONFIG_AMD
-    case X86_VENDOR_AMD:
-    case X86_VENDOR_HYGON:
+    if ( x86_vendor_is(c->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON) )
         inited = amd_mcheck_init(c, bsp);
-        break;
-#endif
-
-#ifdef CONFIG_INTEL
-    case X86_VENDOR_INTEL:
-        switch ( c->x86 )
-        {
-        case 6:
-        case 15:
-            inited = intel_mcheck_init(c, bsp);
-            break;
-        }
-        break;
-#endif
-
-    default:
-        break;
-    }
+    else if ( x86_vendor_is(c->x86_vendor, X86_VENDOR_INTEL) &&
+              (c->x86 == 6 || c->x86 == 15) )
+        inited = intel_mcheck_init(c, bsp);
 
     show_mca_info(inited, c);
     if ( inited == mcheck_none || inited == mcheck_unset )
diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h
index 920b075355..14261f925b 100644
--- a/xen/arch/x86/cpu/mcheck/mce.h
+++ b/xen/arch/x86/cpu/mcheck/mce.h
@@ -137,28 +137,26 @@ void x86_mcinfo_dump(struct mc_info *mi);
 
 static inline int mce_vendor_bank_msr(const struct vcpu *v, uint32_t msr)
 {
-    switch (boot_cpu_data.x86_vendor) {
-    case X86_VENDOR_INTEL:
+    if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
+    {
         if (msr >= MSR_IA32_MC0_CTL2 &&
             msr < MSR_IA32_MCx_CTL2(v->arch.vmce.mcg_cap & MCG_CAP_COUNT) )
             return 1;
-        fallthrough;
-
-    case X86_VENDOR_CENTAUR:
-    case X86_VENDOR_SHANGHAI:
-        if (msr == MSR_P5_MC_ADDR || msr == MSR_P5_MC_TYPE)
+        if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_CENTAUR |
+                                                     X86_VENDOR_SHANGHAI) &&
+             (msr == MSR_P5_MC_ADDR || msr == MSR_P5_MC_TYPE) )
             return 1;
-        break;
-
-    case X86_VENDOR_AMD:
+    }
+    else if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD) )
+    {
         switch (msr) {
         case MSR_F10_MC4_MISC1:
         case MSR_F10_MC4_MISC2:
         case MSR_F10_MC4_MISC3:
             return 1;
         }
-        break;
     }
+
     return 0;
 }
 
diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c 
b/xen/arch/x86/cpu/mcheck/non-fatal.c
index a9ee9bb94f..db0ddc5b7b 100644
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c
@@ -23,25 +23,15 @@ static int __init cf_check init_nonfatal_mce_checker(void)
        /*
         * Check for non-fatal errors every MCE_RATE s
         */
-       switch (c->x86_vendor) {
-#ifdef CONFIG_AMD
-       case X86_VENDOR_AMD:
-       case X86_VENDOR_HYGON:
+       if ( x86_vendor_is(c->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON) )
                /* Assume we are on K8 or newer AMD or Hygon CPU here */
                amd_nonfatal_mcheck_init(c);
-               break;
-#endif
-
-#ifdef CONFIG_INTEL
-       case X86_VENDOR_INTEL:
+       else if ( x86_vendor_is(c->x86_vendor, X86_VENDOR_INTEL) )
                intel_nonfatal_mcheck_init(c);
-               break;
-#endif
-
-       default:
-               /* unhandled vendor isn't really an error */
+       else
+                /* unhandled vendor isn't really an error */
                return 0;
-       }
+
        printk(KERN_INFO "mcheck_poll: Machine check polling timer started.\n");
        return 0;
 }
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 1a7e92506a..dd1ccecfe5 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -136,27 +136,14 @@ static int bank_mce_rdmsr(const struct vcpu *v, uint32_t 
msr, uint64_t *val)
         break;
 
     default:
-        switch ( boot_cpu_data.x86_vendor )
-        {
-#ifdef CONFIG_INTEL
-        case X86_VENDOR_CENTAUR:
-        case X86_VENDOR_SHANGHAI:
-        case X86_VENDOR_INTEL:
+        ret = 0;
+        if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_CENTAUR  |
+                                                     X86_VENDOR_SHANGHAI |
+                                                     X86_VENDOR_INTEL) )
             ret = vmce_intel_rdmsr(v, msr, val);
-            break;
-#endif
-
-#ifdef CONFIG_AMD
-        case X86_VENDOR_AMD:
-        case X86_VENDOR_HYGON:
+        else if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD |
+                                                          X86_VENDOR_HYGON) )
             ret = vmce_amd_rdmsr(v, msr, val);
-            break;
-#endif
-
-        default:
-            ret = 0;
-            break;
-        }
         break;
     }
 
@@ -273,25 +260,12 @@ static int bank_mce_wrmsr(struct vcpu *v, uint32_t msr, 
uint64_t val)
         break;
 
     default:
-        switch ( boot_cpu_data.x86_vendor )
-        {
-#ifdef CONFIG_INTEL
-        case X86_VENDOR_INTEL:
+        ret = 0;
+        if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
             ret = vmce_intel_wrmsr(v, msr, val);
-            break;
-#endif
-
-#ifdef CONFIG_AMD
-        case X86_VENDOR_AMD:
-        case X86_VENDOR_HYGON:
+        else if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD |
+                                                          X86_VENDOR_HYGON) )
             ret = vmce_amd_wrmsr(v, msr, val);
-            break;
-#endif
-
-        default:
-            ret = 0;
-            break;
-        }
         break;
     }
 
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 3a21e035f4..f3e4ae4a4d 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -319,9 +319,8 @@ void domain_cpu_policy_changed(struct domain *d)
             if ( cpu_has_htt )
                 edx |= cpufeat_mask(X86_FEATURE_HTT);
 
-            switch ( boot_cpu_data.x86_vendor )
+            if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
             {
-            case X86_VENDOR_INTEL:
                 /*
                  * Intel masking MSRs are documented as AND masks.
                  * Experimentally, they are applied after OSXSAVE and APIC
@@ -336,10 +335,10 @@ void domain_cpu_policy_changed(struct domain *d)
                 edx = cpufeat_mask(X86_FEATURE_APIC);
 
                 mask |= ((uint64_t)edx << 32) | ecx;
-                break;
-
-            case X86_VENDOR_AMD:
-            case X86_VENDOR_HYGON:
+            }
+            else if ( x86_vendor_is(boot_cpu_data.x86_vendor,
+                                    X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+            {
                 mask &= ((uint64_t)ecx << 32) | edx;
 
                 /*
@@ -362,7 +361,6 @@ void domain_cpu_policy_changed(struct domain *d)
                     ecx |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
 
                 mask |= ((uint64_t)ecx << 32) | edx;
-                break;
             }
 
             d->arch.pv.cpuidmasks->_1cd = mask;
diff --git a/xen/arch/x86/guest/xen/xen.c b/xen/arch/x86/guest/xen/xen.c
index 77a3a8742a..7802b5f506 100644
--- a/xen/arch/x86/guest/xen/xen.c
+++ b/xen/arch/x86/guest/xen/xen.c
@@ -59,23 +59,18 @@ void asmlinkage __init early_hypercall_setup(void)
         boot_cpu_data.x86_vendor = x86_cpuid_lookup_vendor(ebx, ecx, edx);
     }
 
-    switch ( boot_cpu_data.x86_vendor )
+    if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL   |
+                                                 X86_VENDOR_CENTAUR |
+                                                 X86_VENDOR_SHANGHAI) )
     {
-    case X86_VENDOR_INTEL:
-    case X86_VENDOR_CENTAUR:
-    case X86_VENDOR_SHANGHAI:
         early_hypercall_insn = 0;
         setup_force_cpu_cap(X86_FEATURE_USE_VMCALL);
-        break;
-
-    case X86_VENDOR_AMD:
-    case X86_VENDOR_HYGON:
+    }
+    else if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD |
+                                                      X86_VENDOR_HYGON) )
         early_hypercall_insn = 1;
-        break;
-
-    default:
+    else
         BUG();
-    }
 }
 
 static void __init find_xen_leaves(void)
diff --git a/xen/arch/x86/nmi.c b/xen/arch/x86/nmi.c
index a0c9194ff0..a35e7109fe 100644
--- a/xen/arch/x86/nmi.c
+++ b/xen/arch/x86/nmi.c
@@ -216,11 +216,10 @@ void disable_lapic_nmi_watchdog(void)
 {
     if (nmi_active <= 0)
         return;
-    switch (boot_cpu_data.x86_vendor) {
-    case X86_VENDOR_AMD:
+    if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD) )
         wrmsrns(MSR_K7_EVNTSEL0, 0);
-        break;
-    case X86_VENDOR_INTEL:
+    else if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
+    {
         switch (boot_cpu_data.x86) {
         case 6:
             wrmsrns(MSR_P6_EVNTSEL(0), 0);
@@ -230,7 +229,6 @@ void disable_lapic_nmi_watchdog(void)
             wrmsr(MSR_P4_CRU_ESCR0, 0);
             break;
         }
-        break;
     }
     nmi_active = -1;
     /* tell do_nmi() and others that we're not active any more */
@@ -387,13 +385,10 @@ void setup_apic_nmi_watchdog(void)
     if ( nmi_watchdog == NMI_NONE )
         return;
 
-    switch ( boot_cpu_data.x86_vendor )
-    {
-    case X86_VENDOR_AMD:
+    if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD) )
         setup_k7_watchdog();
-        break;
-
-    case X86_VENDOR_INTEL:
+    else if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
+    {
         switch (boot_cpu_data.x86) {
         case 6:
             setup_p6_watchdog((boot_cpu_data.x86_model < 14) 
@@ -404,7 +399,6 @@ void setup_apic_nmi_watchdog(void)
             setup_p4_watchdog();
             break;
         }
-        break;
     }
 
     if ( nmi_perfctr_msr == 0 )
diff --git a/xen/arch/x86/traps-setup.c b/xen/arch/x86/traps-setup.c
index d77be8f839..83070b050a 100644
--- a/xen/arch/x86/traps-setup.c
+++ b/xen/arch/x86/traps-setup.c
@@ -243,19 +243,15 @@ static void __init init_ler(void)
      * Intel Pentium 4 is the only known CPU to not use the architectural MSR
      * indicies.
      */
-    switch ( boot_cpu_data.x86_vendor )
+    if ( x86_vendor_is(boot_cpu_data.x86_vendor,
+                       X86_VENDOR_INTEL | X86_VENDOR_AMD | X86_VENDOR_HYGON) )
     {
-    case X86_VENDOR_INTEL:
-        if ( boot_cpu_data.x86 == 0xf )
-        {
+        if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) &&
+             (boot_cpu_data.x86 == 0xf) )
             msr = MSR_P4_LER_FROM_LIP;
-            break;
-        }
-        fallthrough;
-    case X86_VENDOR_AMD:
-    case X86_VENDOR_HYGON:
-        msr = MSR_IA32_LASTINTFROMIP;
-        break;
+        else if ( x86_vendor_is(boot_cpu_data.x86_vendor,
+                                X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+            msr = MSR_IA32_LASTINTFROMIP;
     }
 
     if ( msr == 0 )
-- 
2.43.0


Reply via email to