Just following up on this series. Any feedback would be appreciated!

Thanks,
Lance

On Mon, Mar 16, 2026 at 10:36:30AM +0800, Lance Yang wrote:
>
>Gently ping :)
>
>On Mon, Mar 09, 2026 at 10:07:11AM +0800, Lance Yang wrote:
>>From: Lance Yang <[email protected]>
>>
>>Enable the optimization introduced in the previous patch for x86.
>>
>>native_pv_tlb_init() checks whether native_flush_tlb_multi() is in use.
>>On CONFIG_PARAVIRT systems, it checks pv_ops; on non-PARAVIRT, native
>>flush is always in use.
>>
>>It decides once at boot whether to enable the optimization: if using
>>native TLB flush and INVLPGB is not supported, we know IPIs were sent
>>and can skip the redundant sync. The decision is fixed via a static
>>key as Peter suggested[1].
>>
>>PV backends (KVM, Xen, Hyper-V) typically have their own implementations
>>and don't call native_flush_tlb_multi() directly, so they cannot be trusted
>>to provide the IPI guarantees we need.
>>
>>Two-step plan as David suggested[2]:
>>
>>Step 1 (this patch): Skip redundant sync when we're 100% certain the TLB
>>flush sent IPIs. INVLPGB is excluded because when supported, we cannot
>>guarantee IPIs were sent, keeping it clean and simple.
>>
>>Step 2 (future work): Send targeted IPIs only to CPUs actually doing
>>software/lockless page table walks, benefiting all architectures.
>>
>>Regarding Step 2, it obviously only applies to setups where Step 1 does
>>not apply: like x86 with INVLPGB or arm64.
>>
>>[1] 
>>https://lore.kernel.org/linux-mm/[email protected]/
>>[2] 
>>https://lore.kernel.org/linux-mm/[email protected]/
>>
>>Suggested-by: Peter Zijlstra <[email protected]>
>>Suggested-by: David Hildenbrand (Arm) <[email protected]>
>>Signed-off-by: Lance Yang <[email protected]>
>>---
>> arch/x86/include/asm/tlb.h      | 17 ++++++++++++++++-
>> arch/x86/include/asm/tlbflush.h |  2 ++
>> arch/x86/kernel/smpboot.c       |  1 +
>> arch/x86/mm/tlb.c               | 15 +++++++++++++++
>> 4 files changed, 34 insertions(+), 1 deletion(-)
>>
>>diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
>>index 866ea78ba156..99de622d3856 100644
>>--- a/arch/x86/include/asm/tlb.h
>>+++ b/arch/x86/include/asm/tlb.h
>>@@ -5,11 +5,21 @@
>> #define tlb_flush tlb_flush
>> static inline void tlb_flush(struct mmu_gather *tlb);
>> 
>>+#define tlb_table_flush_implies_ipi_broadcast 
>>tlb_table_flush_implies_ipi_broadcast
>>+static inline bool tlb_table_flush_implies_ipi_broadcast(void);
>>+
>> #include <asm-generic/tlb.h>
>> #include <linux/kernel.h>
>> #include <vdso/bits.h>
>> #include <vdso/page.h>
>> 
>>+DECLARE_STATIC_KEY_FALSE(tlb_ipi_broadcast_key);
>>+
>>+static inline bool tlb_table_flush_implies_ipi_broadcast(void)
>>+{
>>+     return static_branch_likely(&tlb_ipi_broadcast_key);
>>+}
>>+
>> static inline void tlb_flush(struct mmu_gather *tlb)
>> {
>>      unsigned long start = 0UL, end = TLB_FLUSH_ALL;
>>@@ -20,7 +30,12 @@ static inline void tlb_flush(struct mmu_gather *tlb)
>>              end = tlb->end;
>>      }
>> 
>>-     flush_tlb_mm_range(tlb->mm, start, end, stride_shift, 
>>tlb->freed_tables);
>>+     /*
>>+      * Pass both freed_tables and unshared_tables so that lazy-TLB CPUs
>>+      * also receive IPIs during unsharing page tables.
>>+      */
>>+     flush_tlb_mm_range(tlb->mm, start, end, stride_shift,
>>+                        tlb->freed_tables || tlb->unshared_tables);
>> }
>> 
>> static inline void invlpg(unsigned long addr)
>>diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
>>index 5a3cdc439e38..8ba853154b46 100644
>>--- a/arch/x86/include/asm/tlbflush.h
>>+++ b/arch/x86/include/asm/tlbflush.h
>>@@ -18,6 +18,8 @@
>> 
>> DECLARE_PER_CPU(u64, tlbstate_untag_mask);
>> 
>>+void __init native_pv_tlb_init(void);
>>+
>> void __flush_tlb_all(void);
>> 
>> #define TLB_FLUSH_ALL        -1UL
>>diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
>>index 5cd6950ab672..3cdb04162843 100644
>>--- a/arch/x86/kernel/smpboot.c
>>+++ b/arch/x86/kernel/smpboot.c
>>@@ -1167,6 +1167,7 @@ void __init native_smp_prepare_boot_cpu(void)
>>              switch_gdt_and_percpu_base(me);
>> 
>>      native_pv_lock_init();
>>+     native_pv_tlb_init();
>> }
>> 
>> void __init native_smp_cpus_done(unsigned int max_cpus)
>>diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
>>index 621e09d049cb..8f5585ebaf09 100644
>>--- a/arch/x86/mm/tlb.c
>>+++ b/arch/x86/mm/tlb.c
>>@@ -26,6 +26,8 @@
>> 
>> #include "mm_internal.h"
>> 
>>+DEFINE_STATIC_KEY_FALSE(tlb_ipi_broadcast_key);
>>+
>> #ifdef CONFIG_PARAVIRT
>> # define STATIC_NOPV
>> #else
>>@@ -1834,3 +1836,16 @@ static int __init 
>>create_tlb_single_page_flush_ceiling(void)
>>      return 0;
>> }
>> late_initcall(create_tlb_single_page_flush_ceiling);
>>+
>>+void __init native_pv_tlb_init(void)
>>+{
>>+#ifdef CONFIG_PARAVIRT
>>+     if (pv_ops.mmu.flush_tlb_multi != native_flush_tlb_multi)
>>+             return;
>>+#endif
>>+
>>+     if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
>>+             return;
>>+
>>+     static_branch_enable(&tlb_ipi_broadcast_key);
>>+}
>
>

Reply via email to