The VSM code page offset register (HV_REGISTER_VSM_CODE_PAGE_OFFSETS)
is x86 specific, its value configures the static call used to return
to VTL0 via the hypercall page. Move the register read from the common
mshv_vtl_get_vsm_regs() into the x86 mshv_vtl_return_call_init(),
which is the sole consumer of the offset.

Change mshv_vtl_return_call_init() from taking a u64 parameter
to taking no arguments, and rename mshv_vtl_get_vsm_regs() to
mshv_vtl_get_vsm_cap_reg() since it now only fetches
HV_REGISTER_VSM_CAPABILITIES.

No functional change on x86. This prepares the common driver code for
ARM64 where VSM code page offsets do not apply.

Signed-off-by: Naman Jain <[email protected]>
---
 arch/x86/hyperv/hv_vtl.c        | 19 +++++++++++++++++--
 arch/x86/include/asm/mshyperv.h |  4 ++--
 drivers/hv/mshv_vtl_main.c      | 24 +++++++++++++-----------
 3 files changed, 32 insertions(+), 15 deletions(-)

diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
index f3ffb6a7cb2d..7c10b34cf8a4 100644
--- a/arch/x86/hyperv/hv_vtl.c
+++ b/arch/x86/hyperv/hv_vtl.c
@@ -293,10 +293,25 @@ EXPORT_SYMBOL_GPL(hv_vtl_configure_reg_page);
 
 DEFINE_STATIC_CALL_NULL(__mshv_vtl_return_hypercall, void (*)(void));
 
-void mshv_vtl_return_call_init(u64 vtl_return_offset)
+int mshv_vtl_return_call_init(void)
 {
+       struct hv_register_assoc vsm_pg_offset_reg;
+       union hv_register_vsm_page_offsets offsets;
+       int ret;
+
+       vsm_pg_offset_reg.name = HV_REGISTER_VSM_CODE_PAGE_OFFSETS;
+
+       ret = hv_call_get_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
+                                      1, input_vtl_zero, &vsm_pg_offset_reg);
+       if (ret)
+               return ret;
+
+       offsets.as_uint64 = vsm_pg_offset_reg.value.reg64;
+
        static_call_update(__mshv_vtl_return_hypercall,
-                          (void *)((u8 *)hv_hypercall_pg + vtl_return_offset));
+                          (void *)((u8 *)hv_hypercall_pg + 
offsets.vtl_return_offset));
+
+       return 0;
 }
 EXPORT_SYMBOL(mshv_vtl_return_call_init);
 
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index b4d80c9a673a..b48f115c1292 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -286,14 +286,14 @@ struct mshv_vtl_cpu_context {
 #ifdef CONFIG_HYPERV_VTL_MODE
 void __init hv_vtl_init_platform(void);
 int __init hv_vtl_early_init(void);
-void mshv_vtl_return_call_init(u64 vtl_return_offset);
+int mshv_vtl_return_call_init(void);
 void mshv_vtl_return_hypercall(void);
 void __mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0);
 int hv_vtl_get_set_reg(struct hv_register_assoc *regs, bool set, bool shared);
 #else
 static inline void __init hv_vtl_init_platform(void) {}
 static inline int __init hv_vtl_early_init(void) { return 0; }
-static inline void mshv_vtl_return_call_init(u64 vtl_return_offset) {}
+static inline int mshv_vtl_return_call_init(void) { return 0; }
 static inline void mshv_vtl_return_hypercall(void) {}
 static inline void __mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0) {}
 #endif
diff --git a/drivers/hv/mshv_vtl_main.c b/drivers/hv/mshv_vtl_main.c
index 4c9ae65ad3e8..be498c9234fd 100644
--- a/drivers/hv/mshv_vtl_main.c
+++ b/drivers/hv/mshv_vtl_main.c
@@ -79,7 +79,6 @@ struct mshv_vtl {
 };
 
 static struct mutex mshv_vtl_poll_file_lock;
-static union hv_register_vsm_page_offsets mshv_vsm_page_offsets;
 static union hv_register_vsm_capabilities mshv_vsm_capabilities;
 
 static DEFINE_PER_CPU(struct mshv_vtl_poll_file, mshv_vtl_poll_file);
@@ -203,21 +202,19 @@ static void mshv_vtl_synic_enable_regs(unsigned int cpu)
        /* VTL2 Host VSP SINT is (un)masked when the user mode requests that */
 }
 
-static int mshv_vtl_get_vsm_regs(void)
+static int mshv_vtl_get_vsm_cap_reg(void)
 {
-       struct hv_register_assoc registers[2];
-       int ret, count = 2;
+       struct hv_register_assoc vsm_capability_reg;
+       int ret;
 
-       registers[0].name = HV_REGISTER_VSM_CODE_PAGE_OFFSETS;
-       registers[1].name = HV_REGISTER_VSM_CAPABILITIES;
+       vsm_capability_reg.name = HV_REGISTER_VSM_CAPABILITIES;
 
        ret = hv_call_get_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
-                                      count, input_vtl_zero, registers);
+                                      1, input_vtl_zero, &vsm_capability_reg);
        if (ret)
                return ret;
 
-       mshv_vsm_page_offsets.as_uint64 = registers[0].value.reg64;
-       mshv_vsm_capabilities.as_uint64 = registers[1].value.reg64;
+       mshv_vsm_capabilities.as_uint64 = vsm_capability_reg.value.reg64;
 
        return ret;
 }
@@ -1139,13 +1136,18 @@ static int __init mshv_vtl_init(void)
        tasklet_init(&msg_dpc, mshv_vtl_sint_on_msg_dpc, 0);
        init_waitqueue_head(&fd_wait_queue);
 
-       if (mshv_vtl_get_vsm_regs()) {
+       if (mshv_vtl_get_vsm_cap_reg()) {
                dev_emerg(dev, "Unable to get VSM capabilities !!\n");
                ret = -ENODEV;
                goto free_dev;
        }
 
-       mshv_vtl_return_call_init(mshv_vsm_page_offsets.vtl_return_offset);
+       ret = mshv_vtl_return_call_init();
+       if (ret) {
+               dev_err(dev, "mshv_vtl_return_call_init failed: %d\n", ret);
+               goto free_dev;
+       }
+
        ret = hv_vtl_setup_synic();
        if (ret)
                goto free_dev;
-- 
2.43.0


Reply via email to