Callers can use this feedback to be more aggressive in making space for
allocations of a cgroup if they know it is protected.

These are counterparts to memcg's mem_cgroup_below_{min,low}.

Signed-off-by: Natalie Vock <[email protected]>
---
 include/linux/cgroup_dmem.h | 16 ++++++++++++
 kernel/cgroup/dmem.c        | 62 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 78 insertions(+)

diff --git a/include/linux/cgroup_dmem.h b/include/linux/cgroup_dmem.h
index dd4869f1d736e..1a88cd0c9eb00 100644
--- a/include/linux/cgroup_dmem.h
+++ b/include/linux/cgroup_dmem.h
@@ -24,6 +24,10 @@ void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state 
*pool, u64 size);
 bool dmem_cgroup_state_evict_valuable(struct dmem_cgroup_pool_state 
*limit_pool,
                                      struct dmem_cgroup_pool_state *test_pool,
                                      bool ignore_low, bool *ret_hit_low);
+bool dmem_cgroup_below_min(struct dmem_cgroup_pool_state *root,
+                          struct dmem_cgroup_pool_state *test);
+bool dmem_cgroup_below_low(struct dmem_cgroup_pool_state *root,
+                          struct dmem_cgroup_pool_state *test);
 
 void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool);
 #else
@@ -59,6 +63,18 @@ bool dmem_cgroup_state_evict_valuable(struct 
dmem_cgroup_pool_state *limit_pool,
        return true;
 }
 
+static inline bool dmem_cgroup_below_min(struct dmem_cgroup_pool_state *root,
+                                        struct dmem_cgroup_pool_state *test)
+{
+       return false;
+}
+
+static inline bool dmem_cgroup_below_low(struct dmem_cgroup_pool_state *root,
+                                        struct dmem_cgroup_pool_state *test)
+{
+       return false;
+}
+
 static inline void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state 
*pool)
 { }
 
diff --git a/kernel/cgroup/dmem.c b/kernel/cgroup/dmem.c
index 10b63433f0573..314c37c06f81e 100644
--- a/kernel/cgroup/dmem.c
+++ b/kernel/cgroup/dmem.c
@@ -641,6 +641,68 @@ int dmem_cgroup_try_charge(struct dmem_cgroup_region 
*region, u64 size,
 }
 EXPORT_SYMBOL_GPL(dmem_cgroup_try_charge);
 
+/**
+ * dmem_cgroup_below_min() - Tests whether current usage is within min limit.
+ *
+ * @root: Root of the subtree to calculate protection for, or NULL to 
calculate global protection.
+ * @test: The pool to test the usage/min limit of.
+ *
+ * Return: true if usage is below min and the cgroup is protected, false 
otherwise.
+ */
+bool dmem_cgroup_below_min(struct dmem_cgroup_pool_state *root,
+                          struct dmem_cgroup_pool_state *test)
+{
+       if (root == test || !pool_parent(test))
+               return false;
+
+       if (!root) {
+               for (root = test; pool_parent(root); root = pool_parent(root))
+                       {}
+       }
+
+       /*
+        * In mem_cgroup_below_min(), the memcg pendant, this call is missing.
+        * mem_cgroup_below_min() gets called during traversal of the cgroup 
tree, where
+        * protection is already calculated as part of the traversal. dmem 
cgroup eviction
+        * does not traverse the cgroup tree, so we need to recalculate 
effective protection
+        * here.
+        */
+       dmem_cgroup_calculate_protection(root, test);
+       return page_counter_read(&test->cnt) <= READ_ONCE(test->cnt.emin);
+}
+EXPORT_SYMBOL_GPL(dmem_cgroup_below_min);
+
+/**
+ * dmem_cgroup_below_low() - Tests whether current usage is within low limit.
+ *
+ * @root: Root of the subtree to calculate protection for, or NULL to 
calculate global protection.
+ * @test: The pool to test the usage/low limit of.
+ *
+ * Return: true if usage is below low and the cgroup is protected, false 
otherwise.
+ */
+bool dmem_cgroup_below_low(struct dmem_cgroup_pool_state *root,
+                          struct dmem_cgroup_pool_state *test)
+{
+       if (root == test || !pool_parent(test))
+               return false;
+
+       if (!root) {
+               for (root = test; pool_parent(root); root = pool_parent(root))
+                       {}
+       }
+
+       /*
+        * In mem_cgroup_below_low(), the memcg pendant, this call is missing.
+        * mem_cgroup_below_low() gets called during traversal of the cgroup 
tree, where
+        * protection is already calculated as part of the traversal. dmem 
cgroup eviction
+        * does not traverse the cgroup tree, so we need to recalculate 
effective protection
+        * here.
+        */
+       dmem_cgroup_calculate_protection(root, test);
+       return page_counter_read(&test->cnt) <= READ_ONCE(test->cnt.elow);
+}
+EXPORT_SYMBOL_GPL(dmem_cgroup_below_low);
+
 static int dmem_cgroup_region_capacity_show(struct seq_file *sf, void *v)
 {
        struct dmem_cgroup_region *region;

-- 
2.51.2

Reply via email to