Handle rx_deferred_start and tx_deferred_start flags during configuration done in rx_queue_setup and tx_queue_setup, so that marked queues do not start with dev_start.
Signed-off-by: Oleksandr Kolomeiets <okl-...@napatech.com> --- drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c | 46 +++++++++++-------- drivers/net/ntnic/include/ntos_drv.h | 3 +- drivers/net/ntnic/ntnic_ethdev.c | 22 +++++---- drivers/net/ntnic/ntnic_mod_reg.h | 12 +++-- 4 files changed, 52 insertions(+), 31 deletions(-) diff --git a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c index 0b049a8559..107fe91394 100644 --- a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c +++ b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c @@ -369,7 +369,8 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t host_id, uint32_t header, uint32_t vq_type, - int irq_vector) + int irq_vector, + uint8_t rx_deferred_start) { uint32_t qs = dbs_qsize_log2(queue_size); uint32_t int_enable; @@ -430,7 +431,8 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use; * good idea to initialize all DBS_RX_QUEUES entries. */ - if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr, RX_AM_ENABLE, + uint32_t enable = rx_deferred_start ? RX_AM_DISABLE : RX_AM_ENABLE; + if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr, enable, host_id, 0, irq_vector >= 0 ? 1 : 0) != 0) { return NULL; } @@ -698,7 +700,8 @@ static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t header, uint32_t vq_type, int irq_vector, - uint32_t in_order) + uint32_t in_order, + uint8_t tx_deferred_start) { uint32_t int_enable; uint32_t vec; @@ -760,8 +763,9 @@ static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs, * kernel). */ if (irq_vector < 0) { + uint32_t enable = tx_deferred_start ? TX_AM_DISABLE : TX_AM_ENABLE; if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr, - TX_AM_ENABLE, host_id, 0, 0) != 0) { + enable, host_id, 0, 0) != 0) { return NULL; } } @@ -794,7 +798,8 @@ nthw_setup_mngd_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs, uint32_t header, struct nthw_memory_descriptor *p_virt_struct_area, struct nthw_memory_descriptor *p_packet_buffers, - int irq_vector) + int irq_vector, + uint8_t rx_deferred_start) { struct virtq_struct_layout_s virtq_struct_layout = dbs_calc_struct_layout(queue_size); @@ -831,7 +836,7 @@ nthw_setup_mngd_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs, virtq_struct_layout.used_offset, (char *)p_virt_struct_area->phys_addr + virtq_struct_layout.desc_offset, - (uint16_t)queue_size, host_id, header, SPLIT_RING, irq_vector); + (uint16_t)queue_size, host_id, header, SPLIT_RING, irq_vector, rx_deferred_start); rxvq[index].usage = NTHW_VIRTQ_MANAGED; @@ -849,7 +854,8 @@ nthw_setup_mngd_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs, int irq_vector, uint32_t in_order, struct nthw_memory_descriptor *p_virt_struct_area, - struct nthw_memory_descriptor *p_packet_buffers) + struct nthw_memory_descriptor *p_packet_buffers, + uint8_t tx_deferred_start) { struct virtq_struct_layout_s virtq_struct_layout = dbs_calc_struct_layout(queue_size); @@ -889,7 +895,7 @@ nthw_setup_mngd_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs, (char *)p_virt_struct_area->phys_addr + virtq_struct_layout.desc_offset, (uint16_t)queue_size, host_id, port, virtual_port, header, - SPLIT_RING, irq_vector, in_order); + SPLIT_RING, irq_vector, in_order, tx_deferred_start); txvq[index].usage = NTHW_VIRTQ_MANAGED; @@ -977,7 +983,8 @@ nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs, uint32_t header, struct nthw_memory_descriptor *p_virt_struct_area, struct nthw_memory_descriptor *p_packet_buffers, - int irq_vector) + int irq_vector, + uint8_t rx_deferred_start) { struct pvirtq_struct_layout_s pvirtq_layout; struct nthw_virt_queue *vq = &rxvq[index]; @@ -996,7 +1003,7 @@ nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs, (void *)((uintptr_t)p_virt_struct_area->phys_addr + pvirtq_layout.device_event_offset), p_virt_struct_area->phys_addr, (uint16_t)queue_size, host_id, - header, PACKED_RING, irq_vector); + header, PACKED_RING, irq_vector, rx_deferred_start); vq->usage = NTHW_VIRTQ_MANAGED; return vq; @@ -1013,7 +1020,8 @@ nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs, int irq_vector, uint32_t in_order, struct nthw_memory_descriptor *p_virt_struct_area, - struct nthw_memory_descriptor *p_packet_buffers) + struct nthw_memory_descriptor *p_packet_buffers, + uint8_t tx_deferred_start) { struct pvirtq_struct_layout_s pvirtq_layout; struct nthw_virt_queue *vq = &txvq[index]; @@ -1030,7 +1038,7 @@ nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs, (void *)((uintptr_t)p_virt_struct_area->phys_addr + pvirtq_layout.device_event_offset), p_virt_struct_area->phys_addr, (uint16_t)queue_size, host_id, - port, virtual_port, header, PACKED_RING, irq_vector, in_order); + port, virtual_port, header, PACKED_RING, irq_vector, in_order, tx_deferred_start); vq->usage = NTHW_VIRTQ_MANAGED; return vq; @@ -1052,18 +1060,19 @@ nthw_setup_mngd_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, struct nthw_memory_descriptor *p_virt_struct_area, struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type, - int irq_vector) + int irq_vector, + uint8_t rx_deferred_start) { switch (vq_type) { case SPLIT_RING: return nthw_setup_mngd_rx_virt_queue_split(p_nthw_dbs, index, queue_size, host_id, header, p_virt_struct_area, - p_packet_buffers, irq_vector); + p_packet_buffers, irq_vector, rx_deferred_start); case PACKED_RING: return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs, index, queue_size, host_id, header, p_virt_struct_area, - p_packet_buffers, irq_vector); + p_packet_buffers, irq_vector, rx_deferred_start); default: break; @@ -1091,7 +1100,8 @@ nthw_setup_mngd_tx_virt_queue(nthw_dbs_t *p_nthw_dbs, struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type, int irq_vector, - uint32_t in_order) + uint32_t in_order, + uint8_t tx_deferred_start) { switch (vq_type) { case SPLIT_RING: @@ -1099,14 +1109,14 @@ nthw_setup_mngd_tx_virt_queue(nthw_dbs_t *p_nthw_dbs, host_id, port, virtual_port, header, irq_vector, in_order, p_virt_struct_area, - p_packet_buffers); + p_packet_buffers, tx_deferred_start); case PACKED_RING: return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index, queue_size, host_id, port, virtual_port, header, irq_vector, in_order, p_virt_struct_area, - p_packet_buffers); + p_packet_buffers, tx_deferred_start); default: break; diff --git a/drivers/net/ntnic/include/ntos_drv.h b/drivers/net/ntnic/include/ntos_drv.h index f6ce442d17..cef3c5c277 100644 --- a/drivers/net/ntnic/include/ntos_drv.h +++ b/drivers/net/ntnic/include/ntos_drv.h @@ -69,7 +69,7 @@ struct __rte_cache_aligned ntnic_rx_queue { nt_meta_port_type_t type; uint32_t port; /* Rx port for this queue */ enum fpga_info_profile profile; /* Inline / Capture */ - + uint8_t rx_deferred_start; }; struct __rte_cache_aligned ntnic_tx_queue { @@ -89,6 +89,7 @@ struct __rte_cache_aligned ntnic_tx_queue { unsigned long err_pkts; /* Tx error packet stat */ int enabled; /* Enabling/disabling of this queue */ enum fpga_info_profile profile; /* Inline / Capture */ + uint8_t tx_deferred_start; }; struct nt_mtr_profile { diff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c index d961edb903..d875e7c236 100644 --- a/drivers/net/ntnic/ntnic_ethdev.c +++ b/drivers/net/ntnic/ntnic_ethdev.c @@ -995,7 +995,7 @@ static int eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused, unsigned int socket_id __rte_unused, - const struct rte_eth_rxconf *rx_conf __rte_unused, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool) { NT_LOG_DBGX(DBG, NTNIC, "Rx queue setup"); @@ -1029,7 +1029,8 @@ static int eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, mbp_priv = rte_mempool_get_priv(rx_q->mb_pool); rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM); - rx_q->enabled = 1; + rx_q->enabled = !rx_conf->rx_deferred_start; + rx_q->rx_deferred_start = rx_conf->rx_deferred_start; if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq, SG_NB_HW_RX_DESCRIPTORS, SG_HW_RX_PKT_BUFFER_SIZE) < 0) @@ -1048,7 +1049,8 @@ static int eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, &rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, - -1); + -1, + rx_conf->rx_deferred_start); NT_LOG(DBG, NTNIC, "(%" PRIu32 ") NTNIC RX OVS-SW queues successfully setup", internals->port); @@ -1060,7 +1062,7 @@ static int eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused, unsigned int socket_id __rte_unused, - const struct rte_eth_txconf *tx_conf __rte_unused) + const struct rte_eth_txconf *tx_conf) { const struct port_ops *port_ops = get_port_ops(); @@ -1141,9 +1143,11 @@ static int eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, tx_q->hwq.pkt_buffers, SPLIT_RING, -1, - IN_ORDER); + IN_ORDER, + tx_conf->tx_deferred_start); - tx_q->enabled = 1; + tx_q->enabled = !tx_conf->tx_deferred_start; + tx_q->tx_deferred_start = tx_conf->tx_deferred_start; NT_LOG(DBG, NTNIC, "(%" PRIu32 ") NTNIC TX OVS-SW queues successfully setup", internals->port); @@ -1365,10 +1369,12 @@ eth_dev_start(struct rte_eth_dev *eth_dev) uint q; for (q = 0; q < internals->nb_rx_queues; q++) - eth_rx_queue_start(eth_dev, q); + if (!internals->rxq_scg[q].rx_deferred_start) + eth_rx_queue_start(eth_dev, q); for (q = 0; q < internals->nb_tx_queues; q++) - eth_tx_queue_start(eth_dev, q); + if (!internals->txq_scg[q].tx_deferred_start) + eth_tx_queue_start(eth_dev, q); if (internals->type == PORT_TYPE_VIRTUAL || internals->type == PORT_TYPE_OVERRIDE) { eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP; diff --git a/drivers/net/ntnic/ntnic_mod_reg.h b/drivers/net/ntnic/ntnic_mod_reg.h index 7f5bd5e0ec..26efec1c65 100644 --- a/drivers/net/ntnic/ntnic_mod_reg.h +++ b/drivers/net/ntnic/ntnic_mod_reg.h @@ -37,7 +37,8 @@ struct sg_ops_s { uint32_t host_id, uint32_t header, uint32_t vq_type, - int irq_vector); + int irq_vector, + uint8_t rx_deferred_start); struct nthw_virt_queue *(*nthw_setup_tx_virt_queue)(nthw_dbs_t *p_nthw_dbs, uint32_t index, uint16_t start_idx, @@ -52,7 +53,8 @@ struct sg_ops_s { uint32_t header, uint32_t vq_type, int irq_vector, - uint32_t in_order); + uint32_t in_order, + uint8_t tx_deferred_start); struct nthw_virt_queue *(*nthw_setup_mngd_rx_virt_queue)(nthw_dbs_t *p_nthw_dbs, uint32_t index, uint32_t queue_size, @@ -70,7 +72,8 @@ struct sg_ops_s { */ struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type, - int irq_vector); + int irq_vector, + uint8_t rx_deferred_start); int (*nthw_release_mngd_rx_virt_queue)(struct nthw_virt_queue *rxvq); struct nthw_virt_queue *(*nthw_setup_mngd_tx_virt_queue)(nthw_dbs_t *p_nthw_dbs, uint32_t index, @@ -92,7 +95,8 @@ struct sg_ops_s { struct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type, int irq_vector, - uint32_t in_order); + uint32_t in_order, + uint8_t tx_deferred_start); int (*nthw_release_mngd_tx_virt_queue)(struct nthw_virt_queue *txvq); int (*nthw_switch_rx_virt_queue)(nthw_dbs_t *p_nthw_dbs, uint32_t index, uint32_t enable); int (*nthw_switch_tx_virt_queue)(nthw_dbs_t *p_nthw_dbs, uint32_t index, uint32_t enable); -- 2.47.1