[...]

>>  static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
>> @@ -685,17 +688,40 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct 
>> sk_buff *skb)
>>
>>  static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
>>  {
>> -       int prio;
>> +       unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
>>         struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
>> +       int prio;
>> +
>> +       /* guard against zero length rings */
>> +       if (!qlen)
>> +               return -EINVAL;
>>
>> -       for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
>> -               qdisc_skb_head_init(band2list(priv, prio));
>> +       for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
>> +               struct skb_array *q = band2list(priv, prio);
>> +               int err;
>> +
>> +               err = skb_array_init(q, qlen, GFP_KERNEL);
>> +               if (err)
>> +                       return -ENOMEM;
> 
> This relies on the caller calling ops->destroy on error to free partially
> allocated state. For uninitialized bands, that calls spin_lock on an
> uninitialized spinlock from skb_array_cleanup -> ptr_ring_cleanup ->
> ptr_ring_consume.

Nice catch will fix in next version. And also make above suggested
changes.

Thanks,
John.

Reply via email to