In the rx fastpath, the function netdev_alloc_skb rarely fails.
Therefore, a likely() optimization is added to this error check
conditional.

CC: Srinivas Eeda <srinivas.e...@oracle.com>
CC: Joe Jin <joe....@oracle.com>
CC: Junxiao Bi <junxiao...@oracle.com>
Signed-off-by: Zhu Yanjun <yanjun....@oracle.com>
---
 drivers/net/ethernet/nvidia/forcedeth.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/nvidia/forcedeth.c 
b/drivers/net/ethernet/nvidia/forcedeth.c
index 49d6d78..a79b9f8 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1817,7 +1817,7 @@ static int nv_alloc_rx(struct net_device *dev)
 
        while (np->put_rx.orig != less_rx) {
                struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + 
NV_RX_ALLOC_PAD);
-               if (skb) {
+               if (likely(skb)) {
                        np->put_rx_ctx->skb = skb;
                        np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
                                                             skb->data,
@@ -1858,7 +1858,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
 
        while (np->put_rx.ex != less_rx) {
                struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + 
NV_RX_ALLOC_PAD);
-               if (skb) {
+               if (likely(skb)) {
                        np->put_rx_ctx->skb = skb;
                        np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
                                                             skb->data,
-- 
2.7.4

Reply via email to