On Wed, May 30, 2018 at 11:01 AM, Jesper Dangaard Brouer <bro...@redhat.com> wrote: > The XDP_REDIRECT map devmap can avoid using ndo_xdp_flush, by instead > instructing ndo_xdp_xmit to flush via XDP_XMIT_FLUSH flag in > appropriate places. > > Notice after this patch it is possible to remove ndo_xdp_flush > completely, as this is the last user of ndo_xdp_flush. This is left > for later patches, to keep driver changes separate. > > Signed-off-by: Jesper Dangaard Brouer <bro...@redhat.com> > --- > kernel/bpf/devmap.c | 20 +++++++------------- > 1 file changed, 7 insertions(+), 13 deletions(-) > > diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c > index 04fbd75a5274..9c846a7a8cff 100644 > --- a/kernel/bpf/devmap.c > +++ b/kernel/bpf/devmap.c > @@ -217,7 +217,7 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit) > } > > static int bq_xmit_all(struct bpf_dtab_netdev *obj, > - struct xdp_bulk_queue *bq) > + struct xdp_bulk_queue *bq, bool flush)
How about we use "int flags" instead of "bool flush" for easier extension? Thanks, Song > { > struct net_device *dev = obj->dev; > int sent = 0, drops = 0, err = 0; > @@ -232,7 +232,8 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj, > prefetch(xdpf); > } > > - sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, 0); > + sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, > + flush ? XDP_XMIT_FLUSH : 0); > if (sent < 0) { > err = sent; > sent = 0; > @@ -276,7 +277,6 @@ void __dev_map_flush(struct bpf_map *map) > for_each_set_bit(bit, bitmap, map->max_entries) { > struct bpf_dtab_netdev *dev = > READ_ONCE(dtab->netdev_map[bit]); > struct xdp_bulk_queue *bq; > - struct net_device *netdev; > > /* This is possible if the dev entry is removed by user space > * between xdp redirect and flush op. > @@ -287,10 +287,7 @@ void __dev_map_flush(struct bpf_map *map) > __clear_bit(bit, bitmap); > > bq = this_cpu_ptr(dev->bulkq); > - bq_xmit_all(dev, bq); > - netdev = dev->dev; > - if (likely(netdev->netdev_ops->ndo_xdp_flush)) > - netdev->netdev_ops->ndo_xdp_flush(netdev); > + bq_xmit_all(dev, bq, true); > } > } > > @@ -320,7 +317,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct > xdp_frame *xdpf, > struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); > > if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) > - bq_xmit_all(obj, bq); > + bq_xmit_all(obj, bq, false); > > /* Ingress dev_rx will be the same for all xdp_frame's in > * bulk_queue, because bq stored per-CPU and must be flushed > @@ -359,8 +356,7 @@ static void *dev_map_lookup_elem(struct bpf_map *map, > void *key) > > static void dev_map_flush_old(struct bpf_dtab_netdev *dev) > { > - if (dev->dev->netdev_ops->ndo_xdp_flush) { > - struct net_device *fl = dev->dev; > + if (dev->dev->netdev_ops->ndo_xdp_xmit) { > struct xdp_bulk_queue *bq; > unsigned long *bitmap; > > @@ -371,9 +367,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) > __clear_bit(dev->bit, bitmap); > > bq = per_cpu_ptr(dev->bulkq, cpu); > - bq_xmit_all(dev, bq); > - > - fl->netdev_ops->ndo_xdp_flush(dev->dev); > + bq_xmit_all(dev, bq, true); > } > } > } >