All current callers of __cpuset_zone_allowed() presently check if
cpusets_enabled() is true first - which is the first check of the
cpuset_zone_allowed() function.

Signed-off-by: Gregory Price <[email protected]>
---
 mm/compaction.c |  7 +++----
 mm/page_alloc.c | 19 ++++++++-----------
 2 files changed, 11 insertions(+), 15 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 1e8f8eca318c..d2176935d3dd 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2829,10 +2829,9 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, 
unsigned int order,
                                        ac->highest_zoneidx, ac->nodemask) {
                enum compact_result status;
 
-               if (cpusets_enabled() &&
-                       (alloc_flags & ALLOC_CPUSET) &&
-                       !__cpuset_zone_allowed(zone, gfp_mask))
-                               continue;
+               if ((alloc_flags & ALLOC_CPUSET) &&
+                   !cpuset_zone_allowed(zone, gfp_mask))
+                       continue;
 
                if (prio > MIN_COMPACT_PRIORITY
                                        && compaction_deferred(zone, order)) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fd5401fb5e00..bcaf1125d109 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3750,10 +3750,9 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int 
order, int alloc_flags,
                struct page *page;
                unsigned long mark;
 
-               if (cpusets_enabled() &&
-                       (alloc_flags & ALLOC_CPUSET) &&
-                       !__cpuset_zone_allowed(zone, gfp_mask))
-                               continue;
+               if ((alloc_flags & ALLOC_CPUSET) &&
+                   !cpuset_zone_allowed(zone, gfp_mask))
+                       continue;
                /*
                 * When allocating a page cache page for writing, we
                 * want to get it from a node that is within its dirty
@@ -4553,10 +4552,9 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
                unsigned long min_wmark = min_wmark_pages(zone);
                bool wmark;
 
-               if (cpusets_enabled() &&
-                       (alloc_flags & ALLOC_CPUSET) &&
-                       !__cpuset_zone_allowed(zone, gfp_mask))
-                               continue;
+               if ((alloc_flags & ALLOC_CPUSET) &&
+                   !cpuset_zone_allowed(zone, gfp_mask))
+                       continue;
 
                available = reclaimable = zone_reclaimable_pages(zone);
                available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
@@ -5052,10 +5050,9 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int 
preferred_nid,
        for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, 
ac.nodemask) {
                unsigned long mark;
 
-               if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
-                   !__cpuset_zone_allowed(zone, gfp)) {
+               if ((alloc_flags & ALLOC_CPUSET) &&
+                   !cpuset_zone_allowed(zone, gfp))
                        continue;
-               }
 
                if (nr_online_nodes > 1 && zone != 
zonelist_zone(ac.preferred_zoneref) &&
                    zone_to_nid(zone) != 
zonelist_node_idx(ac.preferred_zoneref)) {
-- 
2.51.1


Reply via email to