Signed-off-by: Andrew Cooper <[email protected]>
---
CC: Jan Beulich <[email protected]>
CC: Roger Pau MonnĂ© <[email protected]>
CC: Wei Liu <[email protected]>
---
 xen/arch/x86/cpu/vpmu_amd.c             | 16 ++++++++--------
 xen/arch/x86/cpu/vpmu_intel.c           | 16 ++++++++--------
 xen/arch/x86/oprofile/op_model_athlon.c | 16 ++++++++--------
 xen/arch/x86/oprofile/op_model_p4.c     | 14 +++++++-------
 xen/arch/x86/oprofile/op_model_ppro.c   | 26 ++++++++++++++------------
 5 files changed, 45 insertions(+), 43 deletions(-)

diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index 38972089ab42..a5487143f77e 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -186,7 +186,7 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
     msr_bitmap_off(vpmu);
 }
 
-static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
+static int cf_check amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
 {
     return 1;
 }
@@ -206,7 +206,7 @@ static inline void context_load(struct vcpu *v)
     }
 }
 
-static int amd_vpmu_load(struct vcpu *v, bool_t from_guest)
+static int cf_check amd_vpmu_load(struct vcpu *v, bool from_guest)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     struct xen_pmu_amd_ctxt *ctxt;
@@ -280,7 +280,7 @@ static inline void context_save(struct vcpu *v)
         rdmsrl(counters[i], counter_regs[i]);
 }
 
-static int amd_vpmu_save(struct vcpu *v,  bool_t to_guest)
+static int cf_check amd_vpmu_save(struct vcpu *v,  bool to_guest)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     unsigned int i;
@@ -348,8 +348,8 @@ static void context_update(unsigned int msr, u64 
msr_content)
     }
 }
 
-static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
-                             uint64_t supported)
+static int cf_check amd_vpmu_do_wrmsr(
+    unsigned int msr, uint64_t msr_content, uint64_t supported)
 {
     struct vcpu *v = current;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
@@ -407,7 +407,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t 
msr_content,
     return 0;
 }
 
-static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
+static int cf_check amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
 {
     struct vcpu *v = current;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
@@ -425,7 +425,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t 
*msr_content)
     return 0;
 }
 
-static void amd_vpmu_destroy(struct vcpu *v)
+static void cf_check amd_vpmu_destroy(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
@@ -443,7 +443,7 @@ static void amd_vpmu_destroy(struct vcpu *v)
 }
 
 /* VPMU part of the 'q' keyhandler */
-static void amd_vpmu_dump(const struct vcpu *v)
+static void cf_check amd_vpmu_dump(const struct vcpu *v)
 {
     const struct vpmu_struct *vpmu = vcpu_vpmu(v);
     const struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index f59cae543868..8b450ec8a8aa 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -288,7 +288,7 @@ static inline void __core2_vpmu_save(struct vcpu *v)
         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
 }
 
-static int core2_vpmu_save(struct vcpu *v, bool_t to_guest)
+static int cf_check core2_vpmu_save(struct vcpu *v, bool to_guest)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
@@ -407,7 +407,7 @@ static int core2_vpmu_verify(struct vcpu *v)
     return 0;
 }
 
-static int core2_vpmu_load(struct vcpu *v, bool_t from_guest)
+static int cf_check core2_vpmu_load(struct vcpu *v, bool from_guest)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
@@ -522,8 +522,8 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int 
*type, int *index)
     return 1;
 }
 
-static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
-                               uint64_t supported)
+static int cf_check core2_vpmu_do_wrmsr(
+    unsigned int msr, uint64_t msr_content, uint64_t supported)
 {
     int i, tmp;
     int type = -1, index = -1;
@@ -693,7 +693,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t 
msr_content,
     return 0;
 }
 
-static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
+static int cf_check core2_vpmu_do_rdmsr(unsigned int msr, uint64_t 
*msr_content)
 {
     int type = -1, index = -1;
     struct vcpu *v = current;
@@ -733,7 +733,7 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t 
*msr_content)
 }
 
 /* Dump vpmu info on console, called in the context of keyhandler 'q'. */
-static void core2_vpmu_dump(const struct vcpu *v)
+static void cf_check core2_vpmu_dump(const struct vcpu *v)
 {
     const struct vpmu_struct *vpmu = vcpu_vpmu(v);
     unsigned int i;
@@ -778,7 +778,7 @@ static void core2_vpmu_dump(const struct vcpu *v)
     }
 }
 
-static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs)
+static int cf_check core2_vpmu_do_interrupt(struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
     u64 msr_content;
@@ -805,7 +805,7 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs 
*regs)
     return 1;
 }
 
-static void core2_vpmu_destroy(struct vcpu *v)
+static void cf_check core2_vpmu_destroy(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
diff --git a/xen/arch/x86/oprofile/op_model_athlon.c 
b/xen/arch/x86/oprofile/op_model_athlon.c
index 2177f02946e2..7bc5853a6c23 100644
--- a/xen/arch/x86/oprofile/op_model_athlon.c
+++ b/xen/arch/x86/oprofile/op_model_athlon.c
@@ -164,7 +164,7 @@ static inline u64 op_amd_randomize_ibs_op(u64 val)
     return val;
 }
 
-static void athlon_fill_in_addresses(struct op_msrs * const msrs)
+static void cf_check athlon_fill_in_addresses(struct op_msrs * const msrs)
 {
        msrs->counters[0].addr = MSR_K7_PERFCTR0;
        msrs->counters[1].addr = MSR_K7_PERFCTR1;
@@ -177,7 +177,7 @@ static void athlon_fill_in_addresses(struct op_msrs * const 
msrs)
        msrs->controls[3].addr = MSR_K7_EVNTSEL3;
 }
 
-static void fam15h_fill_in_addresses(struct op_msrs * const msrs)
+static void cf_check fam15h_fill_in_addresses(struct op_msrs * const msrs)
 {
        msrs->counters[0].addr = MSR_AMD_FAM15H_PERFCTR0;
        msrs->counters[1].addr = MSR_AMD_FAM15H_PERFCTR1;
@@ -194,7 +194,7 @@ static void fam15h_fill_in_addresses(struct op_msrs * const 
msrs)
        msrs->controls[5].addr = MSR_AMD_FAM15H_EVNTSEL5;
 }
 
-static void athlon_setup_ctrs(struct op_msrs const * const msrs)
+static void cf_check athlon_setup_ctrs(struct op_msrs const * const msrs)
 {
        uint64_t msr_content;
        int i;
@@ -308,9 +308,9 @@ static inline int handle_ibs(int mode, struct cpu_user_regs 
const * const regs)
     return 1;
 }
 
-static int athlon_check_ctrs(unsigned int const cpu,
-                            struct op_msrs const * const msrs,
-                            struct cpu_user_regs const * const regs)
+static int cf_check athlon_check_ctrs(
+       unsigned int const cpu, struct op_msrs const * const msrs,
+       struct cpu_user_regs const * const regs)
 
 {
        uint64_t msr_content;
@@ -386,7 +386,7 @@ static inline void start_ibs(void)
        }
 }
  
-static void athlon_start(struct op_msrs const * const msrs)
+static void cf_check athlon_start(struct op_msrs const * const msrs)
 {
        uint64_t msr_content;
        int i;
@@ -415,7 +415,7 @@ static void stop_ibs(void)
                wrmsrl(MSR_AMD64_IBSOPCTL, 0);
 }
 
-static void athlon_stop(struct op_msrs const * const msrs)
+static void cf_check athlon_stop(struct op_msrs const * const msrs)
 {
        uint64_t msr_content;
        int i;
diff --git a/xen/arch/x86/oprofile/op_model_p4.c 
b/xen/arch/x86/oprofile/op_model_p4.c
index b08ba53cbd39..d047258644db 100644
--- a/xen/arch/x86/oprofile/op_model_p4.c
+++ b/xen/arch/x86/oprofile/op_model_p4.c
@@ -390,7 +390,7 @@ static unsigned int get_stagger(void)
 static unsigned long reset_value[NUM_COUNTERS_NON_HT];
 
 
-static void p4_fill_in_addresses(struct op_msrs * const msrs)
+static void cf_check p4_fill_in_addresses(struct op_msrs * const msrs)
 {
        unsigned int i;
        unsigned int addr, stag;
@@ -530,7 +530,7 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
 }
 
 
-static void p4_setup_ctrs(struct op_msrs const * const msrs)
+static void cf_check p4_setup_ctrs(struct op_msrs const * const msrs)
 {
        unsigned int i;
        uint64_t msr_content;
@@ -609,9 +609,9 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
        }
 }
 
-static int p4_check_ctrs(unsigned int const cpu,
-                         struct op_msrs const * const msrs,
-                         struct cpu_user_regs const * const regs)
+static int cf_check p4_check_ctrs(
+       unsigned int const cpu, struct op_msrs const * const msrs,
+       struct cpu_user_regs const * const regs)
 {
        unsigned long ctr, stag, real;
        uint64_t msr_content;
@@ -665,7 +665,7 @@ static int p4_check_ctrs(unsigned int const cpu,
 }
 
 
-static void p4_start(struct op_msrs const * const msrs)
+static void cf_check p4_start(struct op_msrs const * const msrs)
 {
        unsigned int stag;
        uint64_t msr_content;
@@ -683,7 +683,7 @@ static void p4_start(struct op_msrs const * const msrs)
 }
 
 
-static void p4_stop(struct op_msrs const * const msrs)
+static void cf_check p4_stop(struct op_msrs const * const msrs)
 {
        unsigned int stag;
        uint64_t msr_content;
diff --git a/xen/arch/x86/oprofile/op_model_ppro.c 
b/xen/arch/x86/oprofile/op_model_ppro.c
index 72c504a10216..8d7e13ea8777 100644
--- a/xen/arch/x86/oprofile/op_model_ppro.c
+++ b/xen/arch/x86/oprofile/op_model_ppro.c
@@ -63,7 +63,7 @@ static int counter_width = 32;
 static unsigned long reset_value[OP_MAX_COUNTER];
 int ppro_has_global_ctrl = 0;
 
-static void ppro_fill_in_addresses(struct op_msrs * const msrs)
+static void cf_check ppro_fill_in_addresses(struct op_msrs * const msrs)
 {
        int i;
 
@@ -74,7 +74,7 @@ static void ppro_fill_in_addresses(struct op_msrs * const 
msrs)
 }
 
 
-static void ppro_setup_ctrs(struct op_msrs const * const msrs)
+static void cf_check ppro_setup_ctrs(struct op_msrs const * const msrs)
 {
        uint64_t msr_content;
        int i;
@@ -128,9 +128,9 @@ static void ppro_setup_ctrs(struct op_msrs const * const 
msrs)
        }
 }
 
-static int ppro_check_ctrs(unsigned int const cpu,
-                           struct op_msrs const * const msrs,
-                           struct cpu_user_regs const * const regs)
+static int cf_check ppro_check_ctrs(
+       unsigned int const cpu, struct op_msrs const * const msrs,
+       struct cpu_user_regs const * const regs)
 {
        u64 val;
        int i;
@@ -170,7 +170,7 @@ static int ppro_check_ctrs(unsigned int const cpu,
 }
 
 
-static void ppro_start(struct op_msrs const * const msrs)
+static void cf_check ppro_start(struct op_msrs const * const msrs)
 {
        uint64_t msr_content;
        int i;
@@ -190,7 +190,7 @@ static void ppro_start(struct op_msrs const * const msrs)
 }
 
 
-static void ppro_stop(struct op_msrs const * const msrs)
+static void cf_check ppro_stop(struct op_msrs const * const msrs)
 {
        uint64_t msr_content;
        int i;
@@ -206,7 +206,7 @@ static void ppro_stop(struct op_msrs const * const msrs)
         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0ULL);
 }
 
-static int ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index)
+static int cf_check ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index)
 {
        if ( (msr_index >= MSR_IA32_PERFCTR0) &&
             (msr_index < (MSR_IA32_PERFCTR0 + num_counters)) )
@@ -226,7 +226,7 @@ static int ppro_is_arch_pmu_msr(u64 msr_index, int *type, 
int *index)
         return 0;
 }
 
-static int ppro_allocate_msr(struct vcpu *v)
+static int cf_check ppro_allocate_msr(struct vcpu *v)
 {
        struct vpmu_struct *vpmu = vcpu_vpmu(v);
        struct arch_msr_pair *msr_content;
@@ -245,7 +245,7 @@ static int ppro_allocate_msr(struct vcpu *v)
        return 0;
 }
 
-static void ppro_free_msr(struct vcpu *v)
+static void cf_check ppro_free_msr(struct vcpu *v)
 {
        struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
@@ -255,7 +255,8 @@ static void ppro_free_msr(struct vcpu *v)
        vpmu_reset(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED);
 }
 
-static void ppro_load_msr(struct vcpu *v, int type, int index, u64 
*msr_content)
+static void cf_check ppro_load_msr(
+       struct vcpu *v, int type, int index, u64 *msr_content)
 {
        struct arch_msr_pair *msrs = vcpu_vpmu(v)->context;
        switch ( type )
@@ -269,7 +270,8 @@ static void ppro_load_msr(struct vcpu *v, int type, int 
index, u64 *msr_content)
        }
 }
 
-static void ppro_save_msr(struct vcpu *v, int type, int index, u64 msr_content)
+static void cf_check ppro_save_msr(
+       struct vcpu *v, int type, int index, u64 msr_content)
 {
        struct arch_msr_pair *msrs = vcpu_vpmu(v)->context;
 
-- 
2.11.0


Reply via email to