Add rte_pmd_mlx5_pp_rate_table_query() to report the HW packet
pacing rate table size and how many entries are currently in use.

The total comes from the HCA QoS capability
packet_pacing_rate_table_size. The used count is derived by
collecting unique non-zero PP indices across all TX queues.

Signed-off-by: Vincent Jardin <[email protected]>
---
 drivers/net/mlx5/mlx5_tx.c      | 51 +++++++++++++++++++++++++++++++++
 drivers/net/mlx5/rte_pmd_mlx5.h | 27 +++++++++++++++++
 2 files changed, 78 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c
index 7051390a5e..4cd0e1ce60 100644
--- a/drivers/net/mlx5/mlx5_tx.c
+++ b/drivers/net/mlx5/mlx5_tx.c
@@ -886,3 +886,54 @@ int rte_pmd_mlx5_txq_rate_limit_query(uint16_t port_id, 
uint16_t queue_id,
                                     packet_pacing_rate_limit_index);
        return 0;
 }
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_pmd_mlx5_pp_rate_table_query, 26.07)
+int rte_pmd_mlx5_pp_rate_table_query(uint16_t port_id,
+                                    struct rte_pmd_mlx5_pp_rate_table_info 
*info)
+{
+       struct rte_eth_dev *dev;
+       struct mlx5_priv *priv;
+       uint16_t used = 0;
+       uint16_t seen[RTE_MAX_QUEUES_PER_PORT];
+       unsigned int i;
+
+       if (info == NULL)
+               return -EINVAL;
+       if (!rte_eth_dev_is_valid_port(port_id))
+               return -ENODEV;
+       dev = &rte_eth_devices[port_id];
+       priv = dev->data->dev_private;
+       if (!priv->sh->cdev->config.hca_attr.qos.packet_pacing) {
+               rte_errno = ENOTSUP;
+               return -ENOTSUP;
+       }
+       info->total = priv->sh->cdev->config.hca_attr.qos
+                       .packet_pacing_rate_table_size;
+       /* Count unique non-zero PP indices across all TX queues. */
+       for (i = 0; i < priv->txqs_n; i++) {
+               struct mlx5_txq_data *txq_data;
+               struct mlx5_txq_ctrl *txq_ctrl;
+               uint16_t pp_id;
+               uint16_t j;
+               bool dup;
+
+               if (priv->txqs == NULL || (*priv->txqs)[i] == NULL)
+                       continue;
+               txq_data = (*priv->txqs)[i];
+               txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
+               pp_id = txq_ctrl->rl.pp_id;
+               if (pp_id == 0)
+                       continue;
+               dup = false;
+               for (j = 0; j < used; j++) {
+                       if (seen[j] == pp_id) {
+                               dup = true;
+                               break;
+                       }
+               }
+               if (!dup && used < RTE_DIM(seen))
+                       seen[used++] = pp_id;
+       }
+       info->used = used;
+       return 0;
+}
diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h b/drivers/net/mlx5/rte_pmd_mlx5.h
index 698d7d2032..4033b9acc7 100644
--- a/drivers/net/mlx5/rte_pmd_mlx5.h
+++ b/drivers/net/mlx5/rte_pmd_mlx5.h
@@ -450,6 +450,33 @@ int
 rte_pmd_mlx5_txq_rate_limit_query(uint16_t port_id, uint16_t queue_id,
                                  struct rte_pmd_mlx5_txq_rate_limit_info 
*info);
 
+/**
+ * Packet pacing rate table capacity information.
+ */
+struct rte_pmd_mlx5_pp_rate_table_info {
+       uint16_t total;         /**< Total HW rate table entries. */
+       uint16_t used;          /**< Currently allocated entries. */
+};
+
+/**
+ * Query packet pacing rate table capacity.
+ *
+ * @param[in] port_id
+ *   Port ID.
+ * @param[out] info
+ *   Rate table capacity information.
+ *
+ * @return
+ *   0 on success, negative errno on failure:
+ *   - -ENODEV: invalid port_id.
+ *   - -EINVAL: info is NULL.
+ *   - -ENOTSUP: packet pacing not supported.
+ */
+__rte_experimental
+int
+rte_pmd_mlx5_pp_rate_table_query(uint16_t port_id,
+                                struct rte_pmd_mlx5_pp_rate_table_info *info);
+
 /** Type of mlx5 driver event for which custom callback is called. */
 enum rte_pmd_mlx5_driver_event_cb_type {
        /** Called after HW Rx queue is created. */
-- 
2.43.0

Reply via email to