According to the new locking rule, we have to take tcf_lock for both
->init() and ->dump(), as RTNL will be removed.

Use tcf spinlock to protect private nat action data from concurrent
modification during dump. (nat init already uses tcf spinlock when changing
action state)

Signed-off-by: Vlad Buslov <vla...@mellanox.com>
---
 net/sched/act_nat.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index d98f33fdffe2..c5c1e23add77 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -256,28 +256,31 @@ static int tcf_nat_dump(struct sk_buff *skb, struct 
tc_action *a,
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_nat *p = to_tcf_nat(a);
        struct tc_nat opt = {
-               .old_addr = p->old_addr,
-               .new_addr = p->new_addr,
-               .mask     = p->mask,
-               .flags    = p->flags,
-
                .index    = p->tcf_index,
-               .action   = p->tcf_action,
                .refcnt   = refcount_read(&p->tcf_refcnt) - ref,
                .bindcnt  = atomic_read(&p->tcf_bindcnt) - bind,
        };
        struct tcf_t t;
 
+       spin_lock_bh(&p->tcf_lock);
+       opt.old_addr = p->old_addr;
+       opt.new_addr = p->new_addr;
+       opt.mask = p->mask;
+       opt.flags = p->flags;
+       opt.action = p->tcf_action;
+
        if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
 
        tcf_tm_dump(&t, &p->tcf_tm);
        if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
                goto nla_put_failure;
+       spin_unlock_bh(&p->tcf_lock);
 
        return skb->len;
 
 nla_put_failure:
+       spin_unlock_bh(&p->tcf_lock);
        nlmsg_trim(skb, b);
        return -1;
 }
-- 
2.7.5

Reply via email to