The PXN (Privileged eXecute Never) bit can be set on level 1 page
table entries to prevent execution of the pages covered by that page
table entry from PL1 and above.  So by setting this bit on the level 1
page table entries that cover the user space part of the address
space, we can prevent the kernel from executing those pages.  The
result is comparable to SMEP on x86.

This diff uses the fact that all level 1 entries for userland will be
created by pmap_enter(), whereas entries for the kernel are created by
pmap_growkernel().

ok?


Index: arch/arm/arm/pmap7.c
===================================================================
RCS file: /cvs/src/sys/arch/arm/arm/pmap7.c,v
retrieving revision 1.49
diff -u -p -r1.49 pmap7.c
--- arch/arm/arm/pmap7.c        26 Aug 2016 16:02:33 -0000      1.49
+++ arch/arm/arm/pmap7.c        26 Aug 2016 19:24:39 -0000
@@ -303,6 +303,11 @@ struct l1_ttable {
 #define        L1_IDX(va)              (((vaddr_t)(va)) >> L1_S_SHIFT)
 
 /*
+ * Set if the PXN bit is supported.
+ */
+pd_entry_t l1_c_pxn;
+
+/*
  * A list of all L1 tables
  */
 TAILQ_HEAD(, l1_ttable) l1_list;
@@ -1267,7 +1272,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
                        pd_entry_t *pl1pd, l1pd;
 
                        pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)];
-                       l1pd = L1_C_PROTO | l2b->l2b_phys;
+                       l1pd = L1_C_PROTO | l2b->l2b_phys | l1_c_pxn;
                        if (*pl1pd != l1pd) {
                                *pl1pd = l1pd;
                                PTE_SYNC(pl1pd);
@@ -2861,7 +2866,7 @@ void              (*pmap_zero_page_func)(struct vm_p
 void
 pmap_pte_init_armv7(void)
 {
-       uint32_t id_mmfr3;
+       uint32_t id_mmfr0, id_mmfr3;
 
        /*
         * XXX We want to use proper TEX settings eventually.
@@ -2909,6 +2914,11 @@ pmap_pte_init_armv7(void)
 
        pmap_copy_page_func = pmap_copy_page_generic;
        pmap_zero_page_func = pmap_zero_page_generic;
+
+       /* Check if the PXN bit is supported. */
+       __asm volatile("mrc p15, 0, %0, c0, c1, 4" : "=r"(id_mmfr0));
+       if ((id_mmfr0 & ID_MMFR0_VMSA_MASK) >= VMSA_V7_PXN)
+               l1_c_pxn = L1_C_V7_PXN;
 
        /* Check for coherent walk. */
        __asm volatile("mrc p15, 0, %0, c0, c1, 7" : "=r"(id_mmfr3));
Index: arch/arm/include/pte.h
===================================================================
RCS file: /cvs/src/sys/arch/arm/include/pte.h,v
retrieving revision 1.7
diff -u -p -r1.7 pte.h
--- arch/arm/include/pte.h      18 Aug 2016 09:28:22 -0000      1.7
+++ arch/arm/include/pte.h      26 Aug 2016 19:24:39 -0000
@@ -159,6 +159,7 @@ typedef uint32_t    pt_entry_t;     /* L2 table
 #define        L1_S_V7_AF      0x00000400      /* Access Flag */
 #define        L1_S_V7_IMP     0x00000200      /* implementation defined */
 #define        L1_S_V7_XN      0x00000010      /* eXecute Never */
+#define        L1_S_V7_PXN     0x00000001      /* Privileged eXecute Never */
 
 /* L1 Coarse Descriptor */
 #define        L1_C_IMP0       0x00000004      /* implementation defined */
@@ -170,8 +171,9 @@ typedef uint32_t    pt_entry_t;     /* L2 table
 
 #define        L1_C_XSCALE_P   0x00000200      /* ECC enable for this section 
*/
 
-#define        L1_C_V7_NS      0x00000008      /* Non-secure */
 #define        L1_C_V7_IMP     0x00000200      /* implementation defined */
+#define        L1_C_V7_NS      0x00000008      /* Non-secure */
+#define        L1_C_V7_PXN     0x00000004      /* Privileged eXecute Never */
 
 /* L1 Fine Descriptor */
 #define        L1_F_IMP0       0x00000004      /* implementation defined */

Reply via email to