On Wed, 2016-08-17 at 12:36 -0700, John Fastabend wrote:
> The qdisc_reset operation depends on the qdisc lock at the moment
> to halt any additions to gso_skb and statistics while the list is
> free'd and the stats zeroed.

...

> Signed-off-by: John Fastabend <john.r.fastab...@intel.com>
> ---
>  net/sched/sch_generic.c |   45 +++++++++++++++++++++++++++++++++++----------
>  1 file changed, 35 insertions(+), 10 deletions(-)
> 
> diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
> index 3b9a21f..29238c4 100644
> --- a/net/sched/sch_generic.c
> +++ b/net/sched/sch_generic.c
> @@ -737,6 +737,20 @@ void qdisc_reset(struct Qdisc *qdisc)
>       kfree_skb(qdisc->skb_bad_txq);
>       qdisc->skb_bad_txq = NULL;
>  
> +     if (qdisc->gso_cpu_skb) {
> +             int i;
> +
> +             for_each_possible_cpu(i) {
> +                     struct gso_cell *cell;
> +
> +                     cell = per_cpu_ptr(qdisc->gso_cpu_skb, i);
> +                     if (cell) {
> +                             kfree_skb_list(cell->skb);
> +                             cell = NULL;

        You probably wanted :
                        cell->skb = NULL;


> +                     }
> +             }
> +     }
> +
>       if (qdisc->gso_skb) {
>               kfree_skb_list(qdisc->gso_skb);
>               qdisc->gso_skb = NULL;
> @@ -812,10 +826,6 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue 
> *dev_queue,
>       root_lock = qdisc_lock(oqdisc);
>       spin_lock_bh(root_lock);
>  
> -     /* Prune old scheduler */
> -     if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
> -             qdisc_reset(oqdisc);
> -
>  

This probably belongs to a separate patch, before any per cpu / lockless
qdisc changes ?



Reply via email to