Hi, > -----Original Message----- > From: Stephen Hemminger <step...@networkplumber.org> > Sent: Monday, June 30, 2025 8:42 PM > To: Gagandeep Singh <g.si...@nxp.com> > Cc: dev@dpdk.org; Hemant Agrawal <hemant.agra...@nxp.com>; Sachin > Saxena <sachin.sax...@nxp.com> > Subject: Re: [PATCH v6 6/6] net/dpaa2: enable software taildrop for ordered > queues > > On Mon, 30 Jun 2025 15:28:00 +0530 > Gagandeep Singh <g.si...@nxp.com> wrote: > > > This patch adds support for software taildrop on ordered queues in the > > DPAA2 driver. > > It also enables congestion notification by default on traffic > > management (TM) queues, which is a prerequisite for software taildrop > > functionality. > > > > Signed-off-by: Gagandeep Singh <g.si...@nxp.com> > > --- > > doc/guides/rel_notes/release_25_07.rst | 1 + > > drivers/net/dpaa2/dpaa2_rxtx.c | 24 ++++++++++- > > drivers/net/dpaa2/dpaa2_tm.c | 60 +++++++++++++------------- > > 3 files changed, 53 insertions(+), 32 deletions(-) > > > > diff --git a/doc/guides/rel_notes/release_25_07.rst > > b/doc/guides/rel_notes/release_25_07.rst > > index 53a1c8756d..dd8bc74946 100644 > > --- a/doc/guides/rel_notes/release_25_07.rst > > +++ b/doc/guides/rel_notes/release_25_07.rst > > @@ -163,6 +163,7 @@ New Features > > * **Updated DPAA2 ethernet driver.** > > > > * Added DPBP APIs for setting mempool depletion thresholds. > > + * Enabled software taildrop for ordered queues. > > > > Removed Items > > ------------- > > diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c > > b/drivers/net/dpaa2/dpaa2_rxtx.c index 67d065bb7c..dce1da80bb > 100644 > > --- a/drivers/net/dpaa2/dpaa2_rxtx.c > > +++ b/drivers/net/dpaa2/dpaa2_rxtx.c > > @@ -1800,8 +1800,11 @@ dpaa2_dev_tx_ordered(void *queue, struct > rte_mbuf **bufs, uint16_t nb_pkts) > > while (qbman_result_SCN_state(dpaa2_q->cscn)) { > > retry_count++; > > /* Retry for some time before giving up */ > > - if (retry_count > CONG_RETRY_COUNT) > > + if (retry_count > CONG_RETRY_COUNT) { > > + if (dpaa2_q->tm_sw_td) > > + goto sw_td; > > goto skip_tx; > > + } > > } > > > > frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? > > @@ -1961,6 +1964,25 @@ dpaa2_dev_tx_ordered(void *queue, struct > rte_mbuf **bufs, uint16_t nb_pkts) > > rte_pktmbuf_free_seg(buf_to_free[loop].seg); > > } > > > > + return num_tx; > > +sw_td: > > + loop = 0; > > + while (loop < num_tx) { > > + if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) > > + rte_pktmbuf_free(*bufs); > > + bufs++; > > + loop++; > > + } > > This looks like a possible bug. > Is there a double free here if there are external buffers. No, This while loop scanning the buffers which are already transmitted and freeing the external buffers. All non-external buffers will be freed by HW itself. The function is already maintaining the " buf_to_free " so I will update this code to free from "buf_to_free" to avoid any read from already freed buffer. Thanks.
> > + > > + /* free the pending buffers */ > > + while (nb_pkts) { > > + rte_pktmbuf_free(*bufs); > > + bufs++; > > + nb_pkts--; > > + num_tx++; > > + } > > Why not use rte_pktmbuf_free_bulk here? You are right, I will change it to rte_pktmbuf_free_bulk. > > > + dpaa2_q->tx_pkts += num_tx; > > + > > return num_tx; > > } > > > > diff --git a/drivers/net/dpaa2/dpaa2_tm.c > > b/drivers/net/dpaa2/dpaa2_tm.c index dbf66c756e..36e815c356 100644 > > --- a/drivers/net/dpaa2/dpaa2_tm.c > > +++ b/drivers/net/dpaa2/dpaa2_tm.c > > @@ -611,41 +611,39 @@ dpaa2_tm_configure_queue(struct rte_eth_dev > *dev, struct dpaa2_tm_node *node) > > } > > dpaa2_q->fqid = qid.fqid; > > > > - /* setting congestion notification */ > > - if (!(priv->flags & DPAA2_TX_CGR_OFF)) { > > - struct dpni_congestion_notification_cfg cong_notif_cfg = {0}; > > - > > - cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; > > - cong_notif_cfg.threshold_entry = dpaa2_q->nb_desc; > > - /* Notify that the queue is not congested when the data in > > - * the queue is below this thershold.(90% of value) > > - */ > > - cong_notif_cfg.threshold_exit = (dpaa2_q->nb_desc * 9) / > 10; > > - cong_notif_cfg.message_ctx = 0; > > - iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(dpaa2_q->cscn, > > - sizeof(struct qbman_result)); > > - if (iova == RTE_BAD_IOVA) { > > - DPAA2_PMD_ERR("No IOMMU map for cscn(%p)", > dpaa2_q->cscn); > > - return -ENOBUFS; > > - } > > - cong_notif_cfg.message_iova = iova; > > - cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; > > - cong_notif_cfg.notification_mode = > > + /* setting taildrop through congestion notification */ > > + struct dpni_congestion_notification_cfg cong_notif_cfg = {0}; > > + > > + cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; > > + cong_notif_cfg.threshold_entry = dpaa2_q->nb_desc; > > + /* Notify that the queue is not congested when the data in > > + * the queue is below this thershold.(90% of value) > > + */ > > + cong_notif_cfg.threshold_exit = (dpaa2_q->nb_desc * 9) / 10; > > + cong_notif_cfg.message_ctx = 0; > > + > > + iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(dpaa2_q->cscn, > > + sizeof(struct qbman_result)); > > + if (iova == RTE_BAD_IOVA) { > > + DPAA2_PMD_ERR("No IOMMU map for cscn(%p)", dpaa2_q- > >cscn); > > + return -ENOBUFS; > > + } > > + cong_notif_cfg.message_iova = iova; > > + cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; > > + cong_notif_cfg.notification_mode = > > > DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | > > > DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | > > > DPNI_CONG_OPT_COHERENT_WRITE; > > - cong_notif_cfg.cg_point = DPNI_CP_QUEUE; > > + cong_notif_cfg.cg_point = DPNI_CP_QUEUE; > > > > - ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, > > - priv->token, > > - DPNI_QUEUE_TX, > > - ((node->parent->channel_id << 8) | > tc_id), > > - &cong_notif_cfg); > > - if (ret) { > > - DPAA2_PMD_ERR("Error in setting tx congestion > notification: " > > - "err=%d", ret); > > - return -ret; > > - } > > + ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, > > + priv->token, DPNI_QUEUE_TX, > > + ((node->parent->channel_id << 8) | tc_id), > > + &cong_notif_cfg); > > + if (ret) { > > + DPAA2_PMD_ERR("Error in setting tx congestion notification: > " > > + "err=%d", ret); > > + return -ret; > > } > > dpaa2_q->tm_sw_td = true; > >