In order to prepare for some MSR access function reorg work, switch
most users of native_{read|write}_msr[_safe]() to the more generic
rdmsr*()/wrmsr*() variants.For now this will have some intermediate performance impact with paravirtualization configured when running on bare metal, but this is a prereq change for the planned direct inlining of the rdmsr/wrmsr instructions with this configuration. The main reason for this switch is the planned move of the MSR trace function invocation from the native_*() functions to the generic rdmsr*()/wrmsr*() variants. Without this switch the users of the native_*() functions would lose the related tracing entries. Note that the Xen related MSR access functions will not be switched, as these will be handled after the move of the trace hooks. Signed-off-by: Juergen Gross <[email protected]> Acked-by: Sean Christopherson <[email protected]> Acked-by: Wei Liu <[email protected]> Reviewed-by: H. Peter Anvin (Intel) <[email protected]> --- arch/x86/hyperv/ivm.c | 2 +- arch/x86/kernel/cpu/mshyperv.c | 7 +++++-- arch/x86/kernel/kvmclock.c | 2 +- arch/x86/kvm/svm/svm.c | 16 ++++++++-------- arch/x86/xen/pmu.c | 4 ++-- 5 files changed, 17 insertions(+), 14 deletions(-) diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index 651771534cae..1b2222036a0b 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -327,7 +327,7 @@ int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu) asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector)); hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base); - vmsa->efer = native_read_msr(MSR_EFER); + rdmsrq(MSR_EFER, vmsa->efer); vmsa->cr4 = native_read_cr4(); vmsa->cr3 = __native_read_cr3(); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 579fb2c64cfd..9bebb1a1ebee 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -111,9 +111,12 @@ void hv_para_set_sint_proxy(bool enable) */ u64 hv_para_get_synic_register(unsigned int reg) { + u64 val; + if (WARN_ON(!ms_hyperv.paravisor_present || !hv_is_synic_msr(reg))) return ~0ULL; - return native_read_msr(reg); + rdmsrq(reg, val); + return val; } /* @@ -123,7 +126,7 @@ void hv_para_set_synic_register(unsigned int reg, u64 val) { if (WARN_ON(!ms_hyperv.paravisor_present || !hv_is_synic_msr(reg))) return; - native_write_msr(reg, val); + wrmsrq(reg, val); } u64 hv_get_msr(unsigned int reg) diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index b5991d53fc0e..1002bdd45c0f 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -197,7 +197,7 @@ static void kvm_setup_secondary_clock(void) void kvmclock_disable(void) { if (msr_kvm_system_time) - native_write_msr(msr_kvm_system_time, 0); + wrmsrq(msr_kvm_system_time, 0); } static void __init kvmclock_init_mem(void) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8f8bc863e214..1c0e7cae9e49 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -389,12 +389,12 @@ static void svm_init_erratum_383(void) return; /* Use _safe variants to not break nested virtualization */ - if (native_read_msr_safe(MSR_AMD64_DC_CFG, &val)) + if (rdmsrq_safe(MSR_AMD64_DC_CFG, &val)) return; val |= (1ULL << 47); - native_write_msr_safe(MSR_AMD64_DC_CFG, val); + wrmsrq_safe(MSR_AMD64_DC_CFG, val); erratum_383_found = true; } @@ -554,9 +554,9 @@ static int svm_enable_virtualization_cpu(void) u64 len, status = 0; int err; - err = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &len); + err = rdmsrq_safe(MSR_AMD64_OSVW_ID_LENGTH, &len); if (!err) - err = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, &status); + err = rdmsrq_safe(MSR_AMD64_OSVW_STATUS, &status); if (err) osvw_status = osvw_len = 0; @@ -2029,7 +2029,7 @@ static bool is_erratum_383(void) if (!erratum_383_found) return false; - if (native_read_msr_safe(MSR_IA32_MC0_STATUS, &value)) + if (rdmsrq_safe(MSR_IA32_MC0_STATUS, &value)) return false; /* Bit 62 may or may not be set for this mce */ @@ -2040,11 +2040,11 @@ static bool is_erratum_383(void) /* Clear MCi_STATUS registers */ for (i = 0; i < 6; ++i) - native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0); + wrmsrq_safe(MSR_IA32_MCx_STATUS(i), 0); - if (!native_read_msr_safe(MSR_IA32_MCG_STATUS, &value)) { + if (!rdmsrq_safe(MSR_IA32_MCG_STATUS, &value)) { value &= ~(1ULL << 2); - native_write_msr_safe(MSR_IA32_MCG_STATUS, value); + wrmsrq_safe(MSR_IA32_MCG_STATUS, value); } /* Flush tlb to evict multi-match entries */ diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index 8f89ce0b67e3..d49a3bdc448b 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -323,7 +323,7 @@ static u64 xen_amd_read_pmc(int counter) u64 val; msr = amd_counters_base + (counter * amd_msr_step); - native_read_msr_safe(msr, &val); + rdmsrq_safe(msr, &val); return val; } @@ -349,7 +349,7 @@ static u64 xen_intel_read_pmc(int counter) else msr = MSR_IA32_PERFCTR0 + counter; - native_read_msr_safe(msr, &val); + rdmsrq_safe(msr, &val); return val; } -- 2.53.0
