We change the initialization of the skb transmit buffer queues
in the functions tipc_bcast_xmit() and tipc_rcast_xmit() to also
initialize their spinlocks. This is needed because we may, during
error conditions, need to call skb_queue_purge() on those queues
further down the stack.

Signed-off-by: Jon Maloy <jon.ma...@ericsson.com>
---
 net/tipc/bcast.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 7d99029..a140dd4 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -233,7 +233,7 @@ static int tipc_bcast_xmit(struct net *net, struct 
sk_buff_head *pkts,
        struct sk_buff_head xmitq;
        int rc = 0;
 
-       __skb_queue_head_init(&xmitq);
+       skb_queue_head_init(&xmitq);
        tipc_bcast_lock(net);
        if (tipc_link_bc_peers(l))
                rc = tipc_link_xmit(l, pkts, &xmitq);
@@ -263,7 +263,7 @@ static int tipc_rcast_xmit(struct net *net, struct 
sk_buff_head *pkts,
        u32 dst, selector;
 
        selector = msg_link_selector(buf_msg(skb_peek(pkts)));
-       __skb_queue_head_init(&_pkts);
+       skb_queue_head_init(&_pkts);
 
        list_for_each_entry_safe(n, tmp, &dests->list, list) {
                dst = n->value;
-- 
2.1.4

Reply via email to