[PATCH v2 8/8] x86/module: enable ROX caches for module text

2024-08-25 Thread Mike Rapoport
From: "Mike Rapoport (Microsoft)" 

Enable execmem's cache of PMD_SIZE'ed pages mapped as ROX for module
text allocations.

Signed-off-by: Mike Rapoport (Microsoft) 
---
 arch/x86/mm/init.c | 26 +-
 1 file changed, 25 insertions(+), 1 deletion(-)

diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index eb503f53c319..a0ec99fb9385 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -1053,6 +1053,15 @@ unsigned long arch_max_swapfile_size(void)
 #ifdef CONFIG_EXECMEM
 static struct execmem_info execmem_info __ro_after_init;
 
+static void execmem_fill_trapping_insns(void *ptr, size_t size, bool writeable)
+{
+   /* fill memory with INT3 instructions */
+   if (writeable)
+   memset(ptr, INT3_INSN_OPCODE, size);
+   else
+   text_poke_set(ptr, INT3_INSN_OPCODE, size);
+}
+
 struct execmem_info __init *execmem_arch_setup(void)
 {
unsigned long start, offset = 0;
@@ -1063,8 +1072,23 @@ struct execmem_info __init *execmem_arch_setup(void)
start = MODULES_VADDR + offset;
 
execmem_info = (struct execmem_info){
+   .fill_trapping_insns = execmem_fill_trapping_insns,
.ranges = {
-   [EXECMEM_DEFAULT] = {
+   [EXECMEM_MODULE_TEXT] = {
+   .flags  = EXECMEM_KASAN_SHADOW | 
EXECMEM_ROX_CACHE,
+   .start  = start,
+   .end= MODULES_END,
+   .pgprot = PAGE_KERNEL_ROX,
+   .alignment = MODULE_ALIGN,
+   },
+   [EXECMEM_KPROBES ... EXECMEM_BPF] = {
+   .flags  = EXECMEM_KASAN_SHADOW,
+   .start  = start,
+   .end= MODULES_END,
+   .pgprot = PAGE_KERNEL,
+   .alignment = MODULE_ALIGN,
+   },
+   [EXECMEM_MODULE_DATA] = {
.flags  = EXECMEM_KASAN_SHADOW,
.start  = start,
.end= MODULES_END,
-- 
2.43.0


___
linux-snps-arc mailing list
linux-snps-arc@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-snps-arc


[PATCH v2 7/8] execmem: add support for cache of large ROX pages

2024-08-25 Thread Mike Rapoport
From: "Mike Rapoport (Microsoft)" 

Using large pages to map text areas reduces iTLB pressure and improves
performance.

Extend execmem_alloc() with an ability to use PMD_SIZE'ed pages with ROX
permissions as a cache for smaller allocations.

To populate the cache, a writable large page is allocated from vmalloc with
VM_ALLOW_HUGE_VMAP, filled with invalid instructions and then remapped as
ROX.

Portions of that large page are handed out to execmem_alloc() callers
without any changes to the permissions.

When the memory is freed with execmem_free() it is invalidated again so
that it won't contain stale instructions.

The cache is enabled when an architecture sets EXECMEM_ROX_CACHE flag in
definition of an execmem_range.

Signed-off-by: Mike Rapoport (Microsoft) 
---
 include/linux/execmem.h |   2 +
 mm/execmem.c| 290 +++-
 2 files changed, 287 insertions(+), 5 deletions(-)

diff --git a/include/linux/execmem.h b/include/linux/execmem.h
index dfdf19f8a5e8..7436aa547818 100644
--- a/include/linux/execmem.h
+++ b/include/linux/execmem.h
@@ -77,12 +77,14 @@ struct execmem_range {
 
 /**
  * struct execmem_info - architecture parameters for code allocations
+ * @fill_trapping_insns: set memory to contain instructions that will trap
  * @ranges: array of parameter sets defining architecture specific
  * parameters for executable memory allocations. The ranges that are not
  * explicitly initialized by an architecture use parameters defined for
  * @EXECMEM_DEFAULT.
  */
 struct execmem_info {
+   void (*fill_trapping_insns)(void *ptr, size_t size, bool writable);
struct execmem_rangeranges[EXECMEM_TYPE_MAX];
 };
 
diff --git a/mm/execmem.c b/mm/execmem.c
index 0f6691e9ffe6..3bde0863c50a 100644
--- a/mm/execmem.c
+++ b/mm/execmem.c
@@ -7,28 +7,88 @@
  */
 
 #include 
+#include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
+#include 
+
+#include "internal.h"
+
 static struct execmem_info *execmem_info __ro_after_init;
 static struct execmem_info default_execmem_info __ro_after_init;
 
-static void *__execmem_alloc(struct execmem_range *range, size_t size)
+#ifdef CONFIG_MMU
+struct execmem_cache {
+   struct mutex mutex;
+   struct maple_tree busy_areas;
+   struct maple_tree free_areas;
+};
+
+static struct execmem_cache execmem_cache = {
+   .mutex = __MUTEX_INITIALIZER(execmem_cache.mutex),
+   .busy_areas = MTREE_INIT_EXT(busy_areas, MT_FLAGS_LOCK_EXTERN,
+execmem_cache.mutex),
+   .free_areas = MTREE_INIT_EXT(free_areas, MT_FLAGS_LOCK_EXTERN,
+execmem_cache.mutex),
+};
+
+static void execmem_cache_clean(struct work_struct *work)
+{
+   struct maple_tree *free_areas = &execmem_cache.free_areas;
+   struct mutex *mutex = &execmem_cache.mutex;
+   MA_STATE(mas, free_areas, 0, ULONG_MAX);
+   void *area;
+
+   mutex_lock(mutex);
+   mas_for_each(&mas, area, ULONG_MAX) {
+   size_t size;
+
+   if (!xa_is_value(area))
+   continue;
+
+   size = xa_to_value(area);
+
+   if (IS_ALIGNED(size, PMD_SIZE) &&
+   IS_ALIGNED(mas.index, PMD_SIZE)) {
+   void *ptr = (void *)mas.index;
+
+   mas_erase(&mas);
+   vfree(ptr);
+   }
+   }
+   mutex_unlock(mutex);
+}
+
+static DECLARE_WORK(execmem_cache_clean_work, execmem_cache_clean);
+
+static void execmem_fill_trapping_insns(void *ptr, size_t size, bool writable)
+{
+   if (execmem_info->fill_trapping_insns)
+   execmem_info->fill_trapping_insns(ptr, size, writable);
+   else
+   memset(ptr, 0, size);
+}
+
+static void *execmem_vmalloc(struct execmem_range *range, size_t size,
+pgprot_t pgprot, unsigned long vm_flags)
 {
bool kasan = range->flags & EXECMEM_KASAN_SHADOW;
-   unsigned long vm_flags  = VM_FLUSH_RESET_PERMS;
gfp_t gfp_flags = GFP_KERNEL | __GFP_NOWARN;
+   unsigned int align = range->alignment;
unsigned long start = range->start;
unsigned long end = range->end;
-   unsigned int align = range->alignment;
-   pgprot_t pgprot = range->pgprot;
void *p;
 
if (kasan)
vm_flags |= VM_DEFER_KMEMLEAK;
 
+   if (vm_flags & VM_ALLOW_HUGE_VMAP)
+   align = PMD_SIZE;
+
p = __vmalloc_node_range(size, align, start, end, gfp_flags,
 pgprot, vm_flags, NUMA_NO_NODE,
 __builtin_return_address(0));
@@ -50,8 +110,226 @@ static void *__execmem_alloc(struct execmem_range *range, 
size_t size)
return NULL;
}
 
+   return p;
+}
+
+static int execmem_cache_add(void *ptr, size_t size)
+{
+   struct maple_tree *free_areas = &execmem_cache.free_areas;
+   struct mutex *mutex = &ex