Use the CAST instruction to swap the guest descriptor when FEAT_LSUI
is enabled, avoiding the need to clear the PAN bit.

Signed-off-by: Yeoreum Yun <[email protected]>
---
 arch/arm64/kvm/at.c | 42 +++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 41 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index 885bd5bb2f41..e49d2742a3ab 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -9,6 +9,7 @@
 #include <asm/esr.h>
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
+#include <asm/lsui.h>
 
 static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw)
 {
@@ -1704,6 +1705,43 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 
va, u64 ipa, int *level)
        }
 }
 
+static int __lsui_swap_desc(u64 __user *ptep, u64 old, u64 new)
+{
+       u64 tmp = old;
+       int ret = 0;
+
+       /*
+        * FEAT_LSUI is supported since Armv9.6, where FEAT_PAN is mandatory.
+        * However, this assumption may not always hold:
+        *
+        *   - Some CPUs advertise FEAT_LSUI but lack FEAT_PAN.
+        *   - Virtualisation or ID register overrides may expose invalid
+        *     feature combinations.
+        *
+        * Rather than disabling FEAT_LSUI when FEAT_PAN is absent, wrap LSUI
+        * instructions with uaccess_ttbr0_enable()/disable() when
+        * ARM64_SW_TTBR0_PAN is enabled.
+        */
+       uaccess_ttbr0_enable();
+
+       asm volatile(__LSUI_PREAMBLE
+                    "1: cast   %[old], %[new], %[addr]\n"
+                    "2:\n"
+                    _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret])
+                    : [old] "+r" (old), [addr] "+Q" (*ptep), [ret] "+r" (ret)
+                    : [new] "r" (new)
+                    : "memory");
+
+       uaccess_ttbr0_disable();
+
+       if (ret)
+               return ret;
+       if (tmp != old)
+               return -EAGAIN;
+
+       return ret;
+}
+
 static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new)
 {
        u64 tmp = old;
@@ -1779,7 +1817,9 @@ int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 
old, u64 new)
                return -EPERM;
 
        ptep = (u64 __user *)hva + offset;
-       if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
+       if (cpus_have_final_cap(ARM64_HAS_LSUI))
+               r = __lsui_swap_desc(ptep, old, new);
+       else if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
                r = __lse_swap_desc(ptep, old, new);
        else
                r = __llsc_swap_desc(ptep, old, new);
-- 
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}


Reply via email to