Similarly so kref_put_lock(), decrease the refcount
and call bh_lock_sock(sk) if it reached 0.

This kref_put variant comes handy when in need of
atomically cleanup any socket context along with
setting the refcount to 0.

Cc: Will Deacon <[email protected]> (maintainer:ATOMIC INFRASTRUCTURE)
Cc: Peter Zijlstra <[email protected]> (maintainer:ATOMIC INFRASTRUCTURE)
Cc: Boqun Feng <[email protected]> (reviewer:ATOMIC INFRASTRUCTURE)
Cc: Mark Rutland <[email protected]> (reviewer:ATOMIC INFRASTRUCTURE)
Cc: Andrew Morton <[email protected]>
Signed-off-by: Antonio Quartulli <[email protected]>
---
 include/linux/kref.h     | 11 +++++++++++
 include/linux/refcount.h |  3 +++
 lib/refcount.c           | 32 ++++++++++++++++++++++++++++++++
 3 files changed, 46 insertions(+)

diff --git a/include/linux/kref.h b/include/linux/kref.h
index 
d32e21a2538c292452db99b915b1bb6c3ab15e53..68b11b0c9c0fdd17cf706e0eb15f74cfe2efa178
 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -90,6 +90,17 @@ static inline int kref_put_lock(struct kref *kref,
        return 0;
 }
 
+static inline int kref_put_sock(struct kref *kref,
+                               void (*release)(struct kref *kref),
+                               struct sock *sock)
+{
+       if (refcount_dec_and_lock_sock(&kref->refcount, sock)) {
+               release(kref);
+               return 1;
+       }
+       return 0;
+}
+
 /**
  * kref_get_unless_zero - Increment refcount for object unless it is zero.
  * @kref: object.
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 
35f039ecb2725618ca098e3515c6e19e2aece3ee..22698db1f24fdaf884a508b8a0cd44eb62194a9f
 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -100,6 +100,7 @@
 #include <linux/spinlock_types.h>
 
 struct mutex;
+struct sock;
 
 #define REFCOUNT_INIT(n)       { .refs = ATOMIC_INIT(n), }
 #define REFCOUNT_MAX           INT_MAX
@@ -358,4 +359,6 @@ extern __must_check bool refcount_dec_and_lock(refcount_t 
*r, spinlock_t *lock)
 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
                                                       spinlock_t *lock,
                                                       unsigned long *flags) 
__cond_acquires(lock);
+extern __must_check bool refcount_dec_and_lock_sock(refcount_t *r,
+                                                   struct sock *sock);
 #endif /* _LINUX_REFCOUNT_H */
diff --git a/lib/refcount.c b/lib/refcount.c
index 
a207a8f22b3ca35890671e51c480266d89e4d8d6..76a728581aa49a41ef13f5141f3f2e9816d72e75
 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -7,6 +7,7 @@
 #include <linux/refcount.h>
 #include <linux/spinlock.h>
 #include <linux/bug.h>
+#include <net/sock.h>
 
 #define REFCOUNT_WARN(str)     WARN_ONCE(1, "refcount_t: " str ".\n")
 
@@ -156,6 +157,37 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 }
 EXPORT_SYMBOL(refcount_dec_and_lock);
 
+/**
+ * refcount_dec_and_lock_sock - return holding locked sock if able to decrement
+ *                             refcount to 0
+ * @r: the refcount
+ * @sock: the sock to be locked
+ *
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Return: true and hold sock if able to decrement refcount to 0, false
+ *        otherwise
+ */
+bool refcount_dec_and_lock_sock(refcount_t *r, struct sock *sock)
+{
+       if (refcount_dec_not_one(r))
+               return false;
+
+       bh_lock_sock(sock);
+       if (!refcount_dec_and_test(r)) {
+               bh_unlock_sock(sock);
+               return false;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_sock);
+
 /**
  * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
  *                                 interrupts if able to decrement refcount to 0

-- 
2.45.2


Reply via email to