Hook up the new VM exit codes and handle guest uses of the insns.

Signed-off-by: Jan Beulich <[email protected]>
---
v9: New.
---
The lack of an enable bit is concerning; at least for the nested case
that's a security issue afaict (when L0 isn't aware of the insns, or more
specifically the exit codes).

--- a/xen/include/public/arch-x86/cpufeatureset.h
+++ b/xen/include/public/arch-x86/cpufeatureset.h
@@ -352,7 +352,7 @@ XEN_CPUFEATURE(MCDT_NO,            13*32
 XEN_CPUFEATURE(UC_LOCK_DIS,        13*32+ 6) /*   UC-lock disable */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1.ecx, word 14 */
-XEN_CPUFEATURE(MSR_IMM,            14*32+ 5) /*   RDMSR/WRMSRNS with immediate 
operand */
+XEN_CPUFEATURE(MSR_IMM,            14*32+ 5) /*s  RDMSR/WRMSRNS with immediate 
operand */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1.edx, word 15 */
 XEN_CPUFEATURE(AVX_VNNI_INT8,      15*32+ 4) /*A  AVX-VNNI-INT8 Instructions */
--- a/xen/arch/x86/cpu-policy.c
+++ b/xen/arch/x86/cpu-policy.c
@@ -823,10 +823,11 @@ static void __init calculate_hvm_max_pol
         __clear_bit(X86_FEATURE_PKS, fs);
 
         /*
-         * Don't expose USER-MSR until it is known how (if at all) it is
-         * virtualized on SVM.
+         * Don't expose USER-MSR and MSR-IMM until it is known how (if at all)
+         * they are virtualized on SVM.
          */
         __clear_bit(X86_FEATURE_USER_MSR, fs);
+        __clear_bit(X86_FEATURE_MSR_IMM, fs);
     }
 
     if ( !cpu_has_vmx_msrlist )
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -453,7 +453,7 @@ void domain_cpu_policy_changed(struct do
     }
 
     /* Nested doesn't have the necessary processing, yet. */
-    if ( nestedhvm_enabled(d) && p->feat.user_msr )
+    if ( nestedhvm_enabled(d) && (p->feat.user_msr || p->feat.msr_imm) )
         return /* -EINVAL */;
 
     for_each_vcpu ( d, v )
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -4762,6 +4762,7 @@ void asmlinkage vmx_vmexit_handler(struc
         break;
 
     case EXIT_REASON_URDMSR:
+    case EXIT_REASON_RDMSR_IMM:
     {
         uint64_t msr_content = 0;
 
@@ -4770,7 +4771,7 @@ void asmlinkage vmx_vmexit_handler(struc
         {
         case X86EMUL_OKAY:
             *decode_gpr(regs, msr_imm_gpr()) = msr_content;
-            update_guest_eip(); /* Safe: URDMSR */
+            update_guest_eip(); /* Safe: URDMSR / RDMSR <imm> */
             break;
 
         case X86EMUL_EXCEPTION:
@@ -4781,13 +4782,14 @@ void asmlinkage vmx_vmexit_handler(struc
     }
 
     case EXIT_REASON_UWRMSR:
+    case EXIT_REASON_WRMSRNS_IMM:
         __vmread(EXIT_QUALIFICATION, &exit_qualification);
         switch ( hvm_msr_write_intercept(exit_qualification,
                                          *decode_gpr(regs, msr_imm_gpr()),
                                          true) )
         {
         case X86EMUL_OKAY:
-            update_guest_eip(); /* Safe: UWRMSR */
+            update_guest_eip(); /* Safe: UWRMSR / WRMSRNS <imm> */
             break;
 
         case X86EMUL_EXCEPTION:
--- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
@@ -205,6 +205,8 @@ static inline void pi_clear_sn(struct pi
 #define EXIT_REASON_WRMSRLIST           79
 #define EXIT_REASON_URDMSR              80
 #define EXIT_REASON_UWRMSR              81
+#define EXIT_REASON_RDMSR_IMM           84
+#define EXIT_REASON_WRMSRNS_IMM         85
 /* Remember to also update VMX_PERF_EXIT_REASON_SIZE! */
 
 /*
--- a/xen/arch/x86/include/asm/perfc_defn.h
+++ b/xen/arch/x86/include/asm/perfc_defn.h
@@ -6,7 +6,7 @@ PERFCOUNTER_ARRAY(exceptions,
 
 #ifdef CONFIG_HVM
 
-#define VMX_PERF_EXIT_REASON_SIZE 82
+#define VMX_PERF_EXIT_REASON_SIZE 86
 #define VMEXIT_NPF_PERFC 143
 #define SVM_PERF_EXIT_REASON_SIZE (VMEXIT_NPF_PERFC + 1)
 PERFCOUNTER_ARRAY(vmexits,              "vmexits",


Reply via email to