This part of the patch adds a tunable to
/sys/kernel/mm/transparent_hugepage called threshold.  This threshold
determines how many pages a user must fault in from a single node before
a temporary compound page is turned into a THP.

Signed-off-by: Alex Thorlton <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Nate Zimmer <[email protected]>
Cc: Cliff Wickman <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Wanpeng Li <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michel Lespinasse <[email protected]>
Cc: Benjamin LaHaise <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Al Viro <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Zhang Yanfei <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Jiang Liu <[email protected]>
Cc: Cody P Schafer <[email protected]>
Cc: Glauber Costa <[email protected]>
Cc: Kamezawa Hiroyuki <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Naoya Horiguchi <[email protected]>
Cc: [email protected]
Cc: [email protected]

---
 include/linux/huge_mm.h  |  2 ++
 include/linux/mm_types.h |  1 +
 mm/huge_memory.c         | 30 ++++++++++++++++++++++++++++++
 3 files changed, 33 insertions(+)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 3935428..0943b1b6 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -177,6 +177,8 @@ static inline struct page *compound_trans_head(struct page 
*page)
 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct 
*vma,
                                unsigned long addr, pmd_t pmd, pmd_t *pmdp);
 
+extern int thp_threshold_check(void);
+
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d9851ee..b5efa23 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -408,6 +408,7 @@ struct mm_struct {
 #endif
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        pgtable_t pmd_huge_pte; /* protected by page_table_lock */
+       int thp_threshold;
 #endif
 #ifdef CONFIG_CPUMASK_OFFSTACK
        struct cpumask cpumask_allocation;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index cca80d9..5d388e4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -44,6 +44,9 @@ unsigned long transparent_hugepage_flags __read_mostly =
        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
        (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
 
+/* default to 1 page threshold for handing out thps; maintains old behavior */
+static int transparent_hugepage_threshold = 1;
+
 /* default scan 8*512 pte (or vmas) every 30 second */
 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
 static unsigned int khugepaged_pages_collapsed;
@@ -237,6 +240,11 @@ static struct shrinker huge_zero_page_shrinker = {
        .seeks = DEFAULT_SEEKS,
 };
 
+int thp_threshold_check()
+{
+       return transparent_hugepage_threshold;
+}
+
 #ifdef CONFIG_SYSFS
 
 static ssize_t double_flag_show(struct kobject *kobj,
@@ -376,6 +384,27 @@ static ssize_t use_zero_page_store(struct kobject *kobj,
 }
 static struct kobj_attribute use_zero_page_attr =
        __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
+static ssize_t threshold_show(struct kobject *kobj,
+                             struct kobj_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%d\n", transparent_hugepage_threshold);
+}
+static ssize_t threshold_store(struct kobject *kobj,
+                              struct kobj_attribute *attr,
+                              const char *buf, size_t count)
+{
+       int err, value;
+
+       err = kstrtoint(buf, 10, &value);
+       if (err || value < 1 || value > HPAGE_PMD_NR)
+               return -EINVAL;
+
+       transparent_hugepage_threshold = value;
+
+       return count;
+}
+static struct kobj_attribute threshold_attr =
+       __ATTR(threshold, 0644, threshold_show, threshold_store);
 #ifdef CONFIG_DEBUG_VM
 static ssize_t debug_cow_show(struct kobject *kobj,
                                struct kobj_attribute *attr, char *buf)
@@ -397,6 +426,7 @@ static struct kobj_attribute debug_cow_attr =
 static struct attribute *hugepage_attr[] = {
        &enabled_attr.attr,
        &defrag_attr.attr,
+       &threshold_attr.attr,
        &use_zero_page_attr.attr,
 #ifdef CONFIG_DEBUG_VM
        &debug_cow_attr.attr,
-- 
1.7.12.4


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to