On Fri, 28 Jun 2019 14:39:29 -0700, Shannon Nelson wrote:
> +static int ionic_tx(struct queue *q, struct sk_buff *skb)
> +{
> +     struct tx_stats *stats = q_to_tx_stats(q);
> +     int err;
> +
> +     if (skb->ip_summed == CHECKSUM_PARTIAL)
> +             err = ionic_tx_calc_csum(q, skb);
> +     else
> +             err = ionic_tx_calc_no_csum(q, skb);
> +     if (err)
> +             return err;
> +
> +     err = ionic_tx_skb_frags(q, skb);
> +     if (err)
> +             return err;
> +
> +     skb_tx_timestamp(skb);
> +     stats->pkts++;
> +     stats->bytes += skb->len;

Presumably this is 64bit so you should use 
u64_stats_update_begin()
u64_stats_update_end() 
around it (and all other stats).

> +
> +     ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
> +
> +     return 0;
> +}
> +
> +static int ionic_tx_descs_needed(struct queue *q, struct sk_buff *skb)
> +{
> +     struct tx_stats *stats = q_to_tx_stats(q);
> +     int err;
> +
> +     /* If TSO, need roundup(skb->len/mss) descs */
> +     if (skb_is_gso(skb))
> +             return (skb->len / skb_shinfo(skb)->gso_size) + 1;
> +
> +     /* If non-TSO, just need 1 desc and nr_frags sg elems */
> +     if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS)
> +             return 1;
> +
> +     /* Too many frags, so linearize */
> +     err = skb_linearize(skb);
> +     if (err)
> +             return err;
> +
> +     stats->linearize++;
> +
> +     /* Need 1 desc and zero sg elems */
> +     return 1;
> +}
> +
> +netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
> +{
> +     u16 queue_index = skb_get_queue_mapping(skb);
> +     struct lif *lif = netdev_priv(netdev);
> +     struct queue *q;
> +     int ndescs;
> +     int err;
> +
> +     if (unlikely(!test_bit(LIF_UP, lif->state))) {
> +             dev_kfree_skb(skb);
> +             return NETDEV_TX_OK;
> +     }
> +
> +     if (likely(lif_to_txqcq(lif, queue_index)))
> +             q = lif_to_txq(lif, queue_index);
> +     else
> +             q = lif_to_txq(lif, 0);
> +
> +     ndescs = ionic_tx_descs_needed(q, skb);
> +     if (ndescs < 0)
> +             goto err_out_drop;
> +
> +     if (!ionic_q_has_space(q, ndescs)) {
> +             netif_stop_subqueue(netdev, queue_index);
> +             q->stop++;
> +
> +             /* Might race with ionic_tx_clean, check again */
> +             smp_rmb();
> +             if (ionic_q_has_space(q, ndescs)) {
> +                     netif_wake_subqueue(netdev, queue_index);
> +                     q->wake++;
> +             } else {
> +                     return NETDEV_TX_BUSY;

This should never really happen..

> +             }
> +     }
> +
> +     if (skb_is_gso(skb))
> +             err = ionic_tx_tso(q, skb);
> +     else
> +             err = ionic_tx(q, skb);
> +
> +     if (err)
> +             goto err_out_drop;

.. at this point if you can't guarantee fitting biggest possible frame
in, you have to stop the ring.

> +     return NETDEV_TX_OK;
> +
> +err_out_drop:
> +     q->drop++;
> +     dev_kfree_skb(skb);
> +     return NETDEV_TX_OK;
> +}

Reply via email to