From: Mike Galbraith <umgwanakikb...@gmail.com>

send_msg() disables preemption to avoid out-of-order messages. As the
code inside the preempt disabled section acquires regular spinlocks,
which are converted to 'sleeping' spinlocks on a PREEMPT_RT kernel and
eventually calls into a memory allocator, this conflicts with the RT
semantics.

Convert it to a local_lock which allows RT kernels to substitute them with
a real per CPU lock. On non RT kernels this maps to preempt_disable() as
before. No functional change.

[bigeasy: Patch description]

Cc: Evgeniy Polyakov <z...@ioremap.net>
Cc: netdev@vger.kernel.org
Signed-off-by: Mike Galbraith <umgwanakikb...@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
---
 drivers/connector/cn_proc.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index d58ce664da843..055b0c86a0693 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -18,6 +18,7 @@
 #include <linux/pid_namespace.h>
 
 #include <linux/cn_proc.h>
+#include <linux/locallock.h>
 
 /*
  * Size of a cn_msg followed by a proc_event structure.  Since the
@@ -40,10 +41,11 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, 
CN_VAL_PROC };
 
 /* proc_event_counts is used as the sequence number of the netlink message */
 static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
+static DEFINE_LOCAL_LOCK(send_msg_lock);
 
 static inline void send_msg(struct cn_msg *msg)
 {
-       preempt_disable();
+       local_lock(send_msg_lock);
 
        msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
        ((struct proc_event *)msg->data)->cpu = smp_processor_id();
@@ -56,7 +58,7 @@ static inline void send_msg(struct cn_msg *msg)
         */
        cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
 
-       preempt_enable();
+       local_unlock(send_msg_lock);
 }
 
 void proc_fork_connector(struct task_struct *task)
-- 
2.26.2

Reply via email to