1. Add bytes_compl local variable to collect transmitted bytes
   - dev->stats updates can then be moved outside the while-loop
   - bytes_compl is also needed for future BQL support
2. When bcmgenet device uses Tx checksum offload, each transmitted
   skb has an extra 64-byte header prepended to it. This needs to
   be deducted when counting transmitted bytes.
3. skb->len covers the entire length of skb, whether it is linear or
   fragmented. Thus, when we clean the fragments, do not increase
   transmitted bytes.

Signed-off-by: Petri Gynther <pgynt...@google.com>
---
 drivers/net/ethernet/broadcom/genet/bcmgenet.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c 
b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6746fd0..9990582 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1171,6 +1171,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct 
net_device *dev,
        struct enet_cb *tx_cb_ptr;
        struct netdev_queue *txq;
        unsigned int pkts_compl = 0;
+       unsigned int bytes_compl = 0;
        unsigned int c_index;
        unsigned int txbds_ready;
        unsigned int txbds_processed = 0;
@@ -1193,16 +1194,14 @@ static unsigned int __bcmgenet_tx_reclaim(struct 
net_device *dev,
                tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
                if (tx_cb_ptr->skb) {
                        pkts_compl++;
-                       dev->stats.tx_packets++;
-                       dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+                       bytes_compl += tx_cb_ptr->skb->len -
+                                      (priv->desc_64b_en ? 64 : 0);
                        dma_unmap_single(&dev->dev,
                                         dma_unmap_addr(tx_cb_ptr, dma_addr),
                                         dma_unmap_len(tx_cb_ptr, dma_len),
                                         DMA_TO_DEVICE);
                        bcmgenet_free_cb(tx_cb_ptr);
                } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
-                       dev->stats.tx_bytes +=
-                               dma_unmap_len(tx_cb_ptr, dma_len);
                        dma_unmap_page(&dev->dev,
                                       dma_unmap_addr(tx_cb_ptr, dma_addr),
                                       dma_unmap_len(tx_cb_ptr, dma_len),
@@ -1220,6 +1219,9 @@ static unsigned int __bcmgenet_tx_reclaim(struct 
net_device *dev,
        ring->free_bds += txbds_processed;
        ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
 
+       dev->stats.tx_packets += pkts_compl;
+       dev->stats.tx_bytes += bytes_compl;
+
        if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
                txq = netdev_get_tx_queue(dev, ring->queue);
                if (netif_tx_queue_stopped(txq))
-- 
2.8.0.rc3.226.g39d4020

Reply via email to