From: Eric Dumazet <eric.duma...@gmail.com> Date: Sat, 7 Jul 2018 17:22:01 -0700
> Maybe gro_count should be replaced by a bitmask, so that we can > speed up napi_gro_flush(), since it now has to use 3 cache lines > (gro_hash[] size is 192 bytes) Something like this? diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b683971e500d..a4d859a7e9de 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -310,7 +310,7 @@ struct gro_list { /* * Structure for NAPI scheduling similar to tasklet but with weighting */ -#define GRO_HASH_BUCKETS 8 +#define GRO_HASH_BUCKETS 8 /* Must be <= 32 due to gro_mask */ struct napi_struct { /* The poll_list must only be managed by the entity which * changes the state of the NAPI_STATE_SCHED bit. This means @@ -322,7 +322,7 @@ struct napi_struct { unsigned long state; int weight; - unsigned int gro_count; + unsigned int gro_hash_mask; int (*poll)(struct napi_struct *, int); #ifdef CONFIG_NETPOLL int poll_owner; diff --git a/net/core/dev.c b/net/core/dev.c index 89825c1eccdc..0dfb84a82586 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5158,12 +5158,14 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, list_for_each_entry_safe_reverse(skb, p, head, list) { if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) - return; + goto out; list_del_init(&skb->list); napi_gro_complete(skb); - napi->gro_count--; napi->gro_hash[index].count--; } +out: + if (list_empty(head)) + napi->gro_hash_mask &= ~(1 << index); } /* napi->gro_hash[].list contains packets ordered by age. @@ -5174,8 +5176,10 @@ void napi_gro_flush(struct napi_struct *napi, bool flush_old) { u32 i; - for (i = 0; i < GRO_HASH_BUCKETS; i++) - __napi_gro_flush_chain(napi, i, flush_old); + for (i = 0; i < GRO_HASH_BUCKETS; i++) { + if (napi->gro_hash_mask & (1 << i)) + __napi_gro_flush_chain(napi, i, flush_old); + } } EXPORT_SYMBOL(napi_gro_flush); @@ -5267,8 +5271,8 @@ static void gro_flush_oldest(struct list_head *head) if (WARN_ON_ONCE(!oldest)) return; - /* Do not adjust napi->gro_count, caller is adding a new SKB to - * the chain. + /* Do not adjust napi->gro_hash_mask, caller is adding a new + * SKB to the chain. */ list_del(&oldest->list); napi_gro_complete(oldest); @@ -5342,8 +5346,9 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (pp) { list_del_init(&pp->list); napi_gro_complete(pp); - napi->gro_count--; napi->gro_hash[hash].count--; + if (list_empty(&napi->gro_hash[hash].list)) + napi->gro_hash_mask &= ~(1 << hash); } if (same_flow) @@ -5355,7 +5360,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) { gro_flush_oldest(gro_head); } else { - napi->gro_count++; + napi->gro_hash_mask |= (1 << hash); napi->gro_hash[hash].count++; } NAPI_GRO_CB(skb)->count = 1; @@ -5768,7 +5773,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done) NAPIF_STATE_IN_BUSY_POLL))) return false; - if (n->gro_count) { + if (n->gro_hash_mask) { unsigned long timeout = 0; if (work_done) @@ -5977,7 +5982,7 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) /* Note : we use a relaxed variant of napi_schedule_prep() not setting * NAPI_STATE_MISSED, since we do not react to a device IRQ. */ - if (napi->gro_count && !napi_disable_pending(napi) && + if (napi->gro_hash_mask && !napi_disable_pending(napi) && !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) __napi_schedule_irqoff(napi); @@ -5992,7 +5997,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, INIT_LIST_HEAD(&napi->poll_list); hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); napi->timer.function = napi_watchdog; - napi->gro_count = 0; + napi->gro_hash_mask = 0; for (i = 0; i < GRO_HASH_BUCKETS; i++) { INIT_LIST_HEAD(&napi->gro_hash[i].list); napi->gro_hash[i].count = 0; @@ -6052,7 +6057,7 @@ void netif_napi_del(struct napi_struct *napi) napi_free_frags(napi); flush_gro_hash(napi); - napi->gro_count = 0; + napi->gro_hash_mask = 0; } EXPORT_SYMBOL(netif_napi_del); @@ -6094,7 +6099,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) goto out_unlock; } - if (n->gro_count) { + if (n->gro_hash_mask) { /* flush too old packets * If HZ < 1000, flush all packets. */