On Fri, Mar 4, 2016 at 3:01 PM, Jesper Dangaard Brouer <bro...@redhat.com> wrote:
> /* build_skb() is wrapper over __build_skb(), that specifically > * takes care of skb->head and skb->pfmemalloc > * This means that if @frag_size is not zero, then @data must be backed > @@ -490,8 +500,8 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct > *napi, unsigned int len, > > len += NET_SKB_PAD + NET_IP_ALIGN; > > - if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || > - (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { > + if (unlikely((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || > + (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA)))) { Why unlikely? I know it is better for the common case where most likely linear SKBs are << SKB_WITH_OVERHEAD(PAGE_SIZE)), but what about the case of Hardware LRO, where linear SKB is likely to be >> SKB_WITH_OVERHEAD(PAGE_SIZE)). > skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); > if (!skb) > goto skb_fail; > @@ -508,11 +518,20 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct > *napi, unsigned int len, > if (unlikely(!data)) > return NULL; > > - skb = __build_skb(data, len); > - if (unlikely(!skb)) { > +#define BULK_ALLOC_SIZE 8 > + if (!nc->skb_count) { > + nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache, > + gfp_mask, > BULK_ALLOC_SIZE, > + nc->skb_cache); > + } > + if (likely(nc->skb_count)) { > + skb = (struct sk_buff *)nc->skb_cache[--nc->skb_count]; > + } else { > + /* alloc bulk failed */ > skb_free_frag(data); > return NULL; > } > + skb = ___build_skb(data, len, skb); > > /* use OR instead of assignment to avoid clearing of bits in mask */ > if (nc->page.pfmemalloc) >