Introduce skb_blist, NETIF_F_BATCH_SKBS, use single API for
batching/no-batching, etc.

Signed-off-by: Krishna Kumar <[EMAIL PROTECTED]>
---
 include/linux/netdevice.h |    8 ++++++--
 net/core/dev.c            |   29 ++++++++++++++++++++++++++---
 2 files changed, 32 insertions(+), 5 deletions(-)

diff -ruNp org/include/linux/netdevice.h new/include/linux/netdevice.h
--- org/include/linux/netdevice.h       2007-09-13 09:11:09.000000000 +0530
+++ new/include/linux/netdevice.h       2007-09-14 10:26:21.000000000 +0530
@@ -439,10 +439,11 @@ struct net_device
 #define NETIF_F_NETNS_LOCAL    8192    /* Does not change network namespaces */
 #define NETIF_F_MULTI_QUEUE    16384   /* Has multiple TX/RX queues */
 #define NETIF_F_LRO            32768   /* large receive offload */
+#define NETIF_F_BATCH_SKBS     65536   /* Driver supports multiple skbs/xmit */
 
        /* Segmentation offload features */
-#define NETIF_F_GSO_SHIFT      16
-#define NETIF_F_GSO_MASK       0xffff0000
+#define NETIF_F_GSO_SHIFT      17
+#define NETIF_F_GSO_MASK       0xfffe0000
 #define NETIF_F_TSO            (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
 #define NETIF_F_UFO            (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
 #define NETIF_F_GSO_ROBUST     (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
@@ -548,6 +549,9 @@ struct net_device
        /* Partially transmitted GSO packet. */
        struct sk_buff          *gso_skb;
 
+       /* List of batch skbs (optional, used if driver supports skb batching */
+       struct sk_buff_head     *skb_blist;
+
        /* ingress path synchronizer */
        spinlock_t              ingress_lock;
        struct Qdisc            *qdisc_ingress;
diff -ruNp org/net/core/dev.c new/net/core/dev.c
--- org/net/core/dev.c  2007-09-14 10:24:27.000000000 +0530
+++ new/net/core/dev.c  2007-09-14 10:25:36.000000000 +0530
@@ -953,6 +953,16 @@ void netdev_state_change(struct net_devi
        }
 }
 
+static void free_batching(struct net_device *dev)
+{
+       if (dev->skb_blist) {
+               if (!skb_queue_empty(dev->skb_blist))
+                       skb_queue_purge(dev->skb_blist);
+               kfree(dev->skb_blist);
+               dev->skb_blist = NULL;
+       }
+}
+
 /**
  *     dev_load        - load a network module
  *     @name: name of interface
@@ -1534,7 +1544,10 @@ static int dev_gso_segment(struct sk_buf
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       if (likely(!skb->next)) {
+       if (likely(skb)) {
+               if (unlikely(skb->next))
+                       goto gso;
+
                if (!list_empty(&ptype_all))
                        dev_queue_xmit_nit(skb, dev);
 
@@ -1544,10 +1557,10 @@ int dev_hard_start_xmit(struct sk_buff *
                        if (skb->next)
                                goto gso;
                }
-
-               return dev->hard_start_xmit(skb, dev);
        }
 
+       return dev->hard_start_xmit(skb, dev);
+
 gso:
        do {
                struct sk_buff *nskb = skb->next;
@@ -3566,6 +3579,13 @@ int register_netdevice(struct net_device
                }
        }
 
+       if (dev->features & NETIF_F_BATCH_SKBS) {
+               /* Driver supports batching skb */
+               dev->skb_blist = kmalloc(sizeof *dev->skb_blist, GFP_KERNEL);
+               if (dev->skb_blist)
+                       skb_queue_head_init(dev->skb_blist);
+       }
+
        /*
         *      nil rebuild_header routine,
         *      that should be never called and used as just bug trap.
@@ -3901,6 +3921,9 @@ void unregister_netdevice(struct net_dev
 
        synchronize_net();
 
+       /* Deallocate batching structure */
+       free_batching(dev);
+
        /* Shutdown queueing discipline. */
        dev_shutdown(dev);
 
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to