In the ifmcaddr6->mca_flags, there are follows flags. MAF_TIMER_RUNNING MAF_LAST_REPORTER MAF_LOADED MAF_NOREPORT MAF_GSQUERY
The mca_flags value will be protected by a spinlock since the next patches. But MAF_LOADED and MAF_NOREPORT do not need spinlock because they will be protected by RTNL. So, if they are separated from mca_flags, it could reduce atomic context. Suggested-by: Cong Wang <xiyou.wangc...@gmail.com> Signed-off-by: Taehee Yoo <ap420...@gmail.com> --- v1 -> v2: - Separated from previous big one patch. include/net/if_inet6.h | 8 ++++---- net/ipv6/mcast.c | 26 +++++++++++--------------- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index af5244c9ca5c..bec372283ac0 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -107,9 +107,7 @@ struct ip6_sf_list { #define MAF_TIMER_RUNNING 0x01 #define MAF_LAST_REPORTER 0x02 -#define MAF_LOADED 0x04 -#define MAF_NOREPORT 0x08 -#define MAF_GSQUERY 0x10 +#define MAF_GSQUERY 0x04 struct ifmcaddr6 { struct in6_addr mca_addr; @@ -121,7 +119,9 @@ struct ifmcaddr6 { unsigned char mca_crcount; unsigned long mca_sfcount[2]; struct delayed_work mca_work; - unsigned int mca_flags; + unsigned char mca_flags; + bool mca_noreport; + bool mca_loaded; int mca_users; refcount_t mca_refcnt; spinlock_t mca_lock; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 80597dc56f2a..1f7fd3fbb4b6 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -661,15 +661,13 @@ static void igmp6_group_added(struct ifmcaddr6 *mc) IPV6_ADDR_SCOPE_LINKLOCAL) return; - spin_lock_bh(&mc->mca_lock); - if (!(mc->mca_flags&MAF_LOADED)) { - mc->mca_flags |= MAF_LOADED; + if (!mc->mca_loaded) { + mc->mca_loaded = true; if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) dev_mc_add(dev, buf); } - spin_unlock_bh(&mc->mca_lock); - if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT)) + if (!(dev->flags & IFF_UP) || mc->mca_noreport) return; if (mld_in_v1_mode(mc->idev)) { @@ -697,15 +695,13 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc) IPV6_ADDR_SCOPE_LINKLOCAL) return; - spin_lock_bh(&mc->mca_lock); - if (mc->mca_flags&MAF_LOADED) { - mc->mca_flags &= ~MAF_LOADED; + if (mc->mca_loaded) { + mc->mca_loaded = false; if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) dev_mc_del(dev, buf); } - spin_unlock_bh(&mc->mca_lock); - if (mc->mca_flags & MAF_NOREPORT) + if (mc->mca_noreport) return; if (!mc->idev->dead) @@ -868,7 +864,7 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) - mc->mca_flags |= MAF_NOREPORT; + mc->mca_noreport = true; return mc; } @@ -1733,7 +1729,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, int scount, stotal, first, isquery, truncate; unsigned int mtu; - if (pmc->mca_flags & MAF_NOREPORT) + if (pmc->mca_noreport) return skb; mtu = READ_ONCE(dev->mtu); @@ -1855,7 +1851,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) read_lock_bh(&idev->lock); if (!pmc) { for (pmc = idev->mc_list; pmc; pmc = pmc->next) { - if (pmc->mca_flags & MAF_NOREPORT) + if (pmc->mca_noreport) continue; spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) @@ -2149,7 +2145,7 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, psf_prev->sf_next = psf->sf_next; else pmc->mca_sources = psf->sf_next; - if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) && + if (psf->sf_oldin && !pmc->mca_noreport && !mld_in_v1_mode(idev)) { psf->sf_crcount = idev->mc_qrv; psf->sf_next = pmc->mca_tomb; @@ -2410,7 +2406,7 @@ static void igmp6_join_group(struct ifmcaddr6 *ma) { unsigned long delay; - if (ma->mca_flags & MAF_NOREPORT) + if (ma->mca_noreport) return; igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); -- 2.17.1