In the new consecutive memory mode of Tx queues allocation, the
needed WQ length is calculated via txq_calc_wqebb_cnt(). This
function is used both in Verbs and Devx modes, but in the Devx queue
creation, the actual length is re-calculated / adjusted for the Devx
API. The queue parameters 'max_inline_data' and 'inlen_send' are used
separately for the length calculation for different modes and the
counts calculated are different as a result.

So passing the mode as a bool input to adjust the calculation in
different modes will solve the mismatch. In the meanwhile, since the
memory and MR are allocated successfully before creating a SQ / CQ.
So the error is not critical and a warning message is enough, the
saved length can be used to create the queue.

Fixes: d94177339289 ("net/mlx5: use consecutive memory for Tx queue creation")

Signed-off-by: Bing Zhao <bi...@nvidia.com>
---
 drivers/common/mlx5/mlx5_common_devx.c | 23 +++++++++++-------
 drivers/net/mlx5/mlx5_txq.c            | 33 +++++++++++++++++++-------
 2 files changed, 40 insertions(+), 16 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_common_devx.c 
b/drivers/common/mlx5/mlx5_common_devx.c
index e237558ec2..18a53769c9 100644
--- a/drivers/common/mlx5/mlx5_common_devx.c
+++ b/drivers/common/mlx5/mlx5_common_devx.c
@@ -96,6 +96,7 @@ mlx5_devx_cq_create(void *ctx, struct mlx5_devx_cq *cq_obj, 
uint16_t log_desc_n,
        uint32_t num_of_cqes = RTE_BIT32(log_desc_n);
        int ret;
        uint32_t umem_offset, umem_id;
+       uint16_t act_log_size = log_desc_n;
 
        if (page_size == (size_t)-1 || alignment == (size_t)-1) {
                DRV_LOG(ERR, "Failed to get page_size.");
@@ -133,15 +134,18 @@ mlx5_devx_cq_create(void *ctx, struct mlx5_devx_cq 
*cq_obj, uint16_t log_desc_n,
                umem_id = mlx5_os_get_umem_id(umem_obj);
        } else {
                if (umem_size != attr->q_len) {
-                       DRV_LOG(ERR, "Mismatch between saved length and calc 
length of CQ %u-%u",
+                       DRV_LOG(ERR, "Mismatch between saved length and calc 
length"
+                               " of CQ %u-%u, using saved length.",
                                umem_size, attr->q_len);
-                       rte_errno = EINVAL;
-                       return -rte_errno;
+                       /* saved length is a power of 2. */
+                       act_log_size =
+                               (uint16_t)rte_log2_u32(attr->q_len / 
sizeof(struct mlx5_cqe));
                }
                umem_buf = attr->umem;
                umem_offset = attr->q_off;
                umem_dbrec = attr->db_off;
                umem_id = mlx5_os_get_umem_id(attr->umem_obj);
+
        }
        /* Fill attributes for CQ object creation. */
        attr->q_umem_valid = 1;
@@ -151,7 +155,7 @@ mlx5_devx_cq_create(void *ctx, struct mlx5_devx_cq *cq_obj, 
uint16_t log_desc_n,
        attr->db_umem_id = umem_id;
        attr->db_umem_offset = umem_dbrec;
        attr->eqn = eqn;
-       attr->log_cq_size = log_desc_n;
+       attr->log_cq_size = act_log_size;
        attr->log_page_size = rte_log2_u32(page_size);
        /* Create completion queue object with DevX. */
        cq = mlx5_devx_cmd_create_cq(ctx, attr);
@@ -251,6 +255,7 @@ mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, 
uint16_t log_wqbb_n,
        uint32_t num_of_wqbbs = RTE_BIT32(log_wqbb_n);
        int ret;
        uint32_t umem_offset, umem_id;
+       uint16_t act_log_size = log_wqbb_n;
 
        if (alignment == (size_t)-1) {
                DRV_LOG(ERR, "Failed to get WQE buf alignment.");
@@ -281,15 +286,17 @@ mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq 
*sq_obj, uint16_t log_wqbb_n,
                umem_id = mlx5_os_get_umem_id(umem_obj);
        } else {
                if (umem_size != attr->q_len) {
-                       DRV_LOG(ERR, "Mismatch between saved length and calc 
length of WQ %u-%u",
+                       DRV_LOG(WARNING, "Mismatch between saved length and 
calc length"
+                               " of WQ %u-%u, using saved length.",
                                umem_size, attr->q_len);
-                       rte_errno = EINVAL;
-                       return -rte_errno;
+                       /* saved length is a power of 2. */
+                       act_log_size = (uint16_t)rte_log2_u32(attr->q_len / 
MLX5_WQE_SIZE);
                }
                umem_buf = attr->umem;
                umem_offset = attr->q_off;
                umem_dbrec = attr->db_off;
                umem_id = mlx5_os_get_umem_id(attr->umem_obj);
+
        }
        /* Fill attributes for SQ object creation. */
        attr->wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
@@ -300,7 +307,7 @@ mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, 
uint16_t log_wqbb_n,
        attr->wq_attr.dbr_umem_id = umem_id;
        attr->wq_attr.dbr_addr = umem_dbrec;
        attr->wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
-       attr->wq_attr.log_wq_sz = log_wqbb_n;
+       attr->wq_attr.log_wq_sz = act_log_size;
        attr->wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE;
        /* Create send queue object with DevX. */
        sq = mlx5_devx_cmd_create_sq(ctx, attr);
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 0981091acd..b090d8274d 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -695,22 +695,39 @@ mlx5_txq_obj_verify(struct rte_eth_dev *dev)
  *
  * @param txq_ctrl
  *   Pointer to Tx queue control structure.
+ * @param devx
+ *   If the calculation is used for Devx queue.
  *
  * @return
  *   The number of WQEBB.
  */
 static int
-txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
+txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl, bool devx)
 {
        unsigned int wqe_size;
        const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
 
-       wqe_size = MLX5_WQE_CSEG_SIZE +
-                  MLX5_WQE_ESEG_SIZE +
-                  MLX5_WSEG_SIZE -
-                  MLX5_ESEG_MIN_INLINE_SIZE +
-                  txq_ctrl->max_inline_data;
-       wqe_size = RTE_MAX(wqe_size, MLX5_WQE_SIZE);
+       if (devx) {
+               wqe_size = txq_ctrl->txq.tso_en ?
+                          RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) 
: 0;
+               wqe_size += MLX5_WQE_CSEG_SIZE +
+                           MLX5_WQE_ESEG_SIZE +
+                           MLX5_WQE_DSEG_SIZE;
+               if (txq_ctrl->txq.inlen_send)
+                       wqe_size = RTE_MAX(wqe_size, sizeof(struct 
mlx5_wqe_cseg) +
+                                                    sizeof(struct 
mlx5_wqe_eseg) +
+                                                    
RTE_ALIGN(txq_ctrl->txq.inlen_send +
+                                                              sizeof(uint32_t),
+                                                              MLX5_WSEG_SIZE));
+               wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE);
+       } else {
+               wqe_size = MLX5_WQE_CSEG_SIZE +
+                          MLX5_WQE_ESEG_SIZE +
+                          MLX5_WSEG_SIZE -
+                          MLX5_ESEG_MIN_INLINE_SIZE +
+                          txq_ctrl->max_inline_data;
+               wqe_size = RTE_MAX(wqe_size, MLX5_WQE_SIZE);
+       }
        return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
 }
 
@@ -1154,7 +1171,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
        tmpl->txq.idx = idx;
        txq_set_params(tmpl);
        txq_adjust_params(tmpl);
-       wqebb_cnt = txq_calc_wqebb_cnt(tmpl);
+       wqebb_cnt = txq_calc_wqebb_cnt(tmpl, !!mlx5_devx_obj_ops_en(priv->sh));
        max_wqe = mlx5_dev_get_max_wq_size(priv->sh);
        if (wqebb_cnt > max_wqe) {
                DRV_LOG(ERR,
-- 
2.34.1

Reply via email to