Constrain core users of nodemasks to the default_sysram_nodemask,
which is guaranteed to either be NULL or contain the set of nodes
with sysram memory blocks.

Signed-off-by: Gregory Price <[email protected]>
---
 mm/oom_kill.c   |  5 ++++-
 mm/page_alloc.c | 12 ++++++++----
 mm/slub.c       |  4 +++-
 3 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index c145b0feecc1..e0b6137835b2 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -34,6 +34,7 @@
 #include <linux/export.h>
 #include <linux/notifier.h>
 #include <linux/memcontrol.h>
+#include <linux/memory-tiers.h>
 #include <linux/mempolicy.h>
 #include <linux/security.h>
 #include <linux/ptrace.h>
@@ -1118,6 +1119,8 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
 bool out_of_memory(struct oom_control *oc)
 {
        unsigned long freed = 0;
+       if (!oc->nodemask)
+               oc->nodemask = default_sysram_nodes;
 
        if (oom_killer_disabled)
                return false;
@@ -1154,7 +1157,7 @@ bool out_of_memory(struct oom_control *oc)
         */
        oc->constraint = constrained_alloc(oc);
        if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
-               oc->nodemask = NULL;
+               oc->nodemask = default_sysram_nodes;
        check_panic_on_oom(oc);
 
        if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fd5401fb5e00..18213eacf974 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -34,6 +34,7 @@
 #include <linux/cpuset.h>
 #include <linux/pagevec.h>
 #include <linux/memory_hotplug.h>
+#include <linux/memory-tiers.h>
 #include <linux/nodemask.h>
 #include <linux/vmstat.h>
 #include <linux/fault-inject.h>
@@ -4610,7 +4611,7 @@ check_retry_cpuset(int cpuset_mems_cookie, struct 
alloc_context *ac)
         */
        if (cpusets_enabled() && ac->nodemask &&
                        !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
-               ac->nodemask = NULL;
+               ac->nodemask = default_sysram_nodes;
                return true;
        }
 
@@ -4794,7 +4795,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
         * user oriented.
         */
        if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
-               ac->nodemask = NULL;
+               ac->nodemask = default_sysram_nodes;
                ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
                                        ac->highest_zoneidx, ac->nodemask);
        }
@@ -4946,7 +4947,8 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, 
unsigned int order,
                        ac->nodemask = &cpuset_current_mems_allowed;
                else
                        *alloc_flags |= ALLOC_CPUSET;
-       }
+       } else if (!ac->nodemask) /* sysram_nodes may be NULL during __init */
+               ac->nodemask = default_sysram_nodes;
 
        might_alloc(gfp_mask);
 
@@ -5190,8 +5192,10 @@ struct page *__alloc_frozen_pages_noprof(gfp_t gfp, 
unsigned int order,
        /*
         * Restore the original nodemask if it was potentially replaced with
         * &cpuset_current_mems_allowed to optimize the fast-path attempt.
+        *
+        * If not set, default to sysram nodes.
         */
-       ac.nodemask = nodemask;
+       ac.nodemask = nodemask ? nodemask : default_sysram_nodes;
 
        page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
 
diff --git a/mm/slub.c b/mm/slub.c
index d4367f25b20d..b8358a961c4c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -28,6 +28,7 @@
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/mempolicy.h>
+#include <linux/memory-tiers.h>
 #include <linux/ctype.h>
 #include <linux/stackdepot.h>
 #include <linux/debugobjects.h>
@@ -3570,7 +3571,8 @@ static struct slab *get_any_partial(struct kmem_cache *s,
        do {
                cpuset_mems_cookie = read_mems_allowed_begin();
                zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
-               for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
+               for_each_zone_zonelist_nodemask(zone, z, zonelist, 
highest_zoneidx,
+                                               default_sysram_nodes) {
                        struct kmem_cache_node *n;
 
                        n = get_node(s, zone_to_nid(zone));
-- 
2.51.1


Reply via email to