Testpmd exposes "--rxq-share=[N]" parameter which controls sharing Rx queues. Before this patch logic was that either:
- all queues were assigned to the same share group (when N was not passed), - or ports were grouped in subsets of N ports, each subset got different share group index. 2nd option did not work well with dynamic representor probing, where new representors would be assigned to new share group. This patch changes the logic in testpmd to dynamically assign share group index. Each unique switch and Rx domain will get different share group. Signed-off-by: Dariusz Sosnowski <[email protected]> --- app/test-pmd/parameters.c | 14 +---- app/test-pmd/testpmd.c | 84 +++++++++++++++++++++++++-- app/test-pmd/testpmd.h | 2 +- doc/guides/testpmd_app_ug/run_app.rst | 10 ++-- 4 files changed, 89 insertions(+), 21 deletions(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index 3617860830..ecbd618f00 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -351,7 +351,7 @@ static const struct option long_options[] = { NO_ARG(TESTPMD_OPT_MULTI_RX_MEMPOOL), NO_ARG(TESTPMD_OPT_TXONLY_MULTI_FLOW), REQUIRED_ARG(TESTPMD_OPT_TXONLY_FLOWS), - OPTIONAL_ARG(TESTPMD_OPT_RXQ_SHARE), + NO_ARG(TESTPMD_OPT_RXQ_SHARE), REQUIRED_ARG(TESTPMD_OPT_ETH_LINK_SPEED), NO_ARG(TESTPMD_OPT_DISABLE_LINK_CHECK), NO_ARG(TESTPMD_OPT_DISABLE_DEVICE_START), @@ -507,7 +507,7 @@ usage(char* progname) printf(" --tx-ip=src,dst: IP addresses in Tx-only mode\n"); printf(" --tx-udp=src[,dst]: UDP ports in Tx-only mode\n"); printf(" --eth-link-speed: force link speed.\n"); - printf(" --rxq-share=X: number of ports per shared Rx queue groups, defaults to UINT32_MAX (1 group)\n"); + printf(" --rxq-share: enable Rx queue sharing per switch and Rx domain\n"); printf(" --disable-link-check: disable check on link status when " "starting/stopping ports.\n"); printf(" --disable-device-start: do not automatically start port\n"); @@ -1579,15 +1579,7 @@ launch_args_parse(int argc, char** argv) rte_exit(EXIT_FAILURE, "txonly-flows must be >= 1 and <= 64\n"); break; case TESTPMD_OPT_RXQ_SHARE_NUM: - if (optarg == NULL) { - rxq_share = UINT32_MAX; - } else { - n = atoi(optarg); - if (n >= 0) - rxq_share = (uint32_t)n; - else - rte_exit(EXIT_FAILURE, "rxq-share must be >= 0\n"); - } + rxq_share = 1; break; case TESTPMD_OPT_NO_FLUSH_RX_NUM: no_flush_rx = 1; diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index aad880aa34..a70efbb03f 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -545,9 +545,17 @@ uint8_t record_core_cycles; uint8_t record_burst_stats; /* - * Number of ports per shared Rx queue group, 0 disable. + * Enable Rx queue sharing between ports in the same switch and Rx domain. */ -uint32_t rxq_share; +uint8_t rxq_share; + +struct share_group_slot { + uint16_t domain_id; + uint16_t rx_domain; + uint16_t share_group; +}; + +static struct share_group_slot share_group_slots[RTE_MAX_ETHPORTS]; unsigned int num_sockets = 0; unsigned int socket_ids[RTE_MAX_NUMA_NODES]; @@ -586,6 +594,67 @@ int proc_id; */ unsigned int num_procs = 1; +static int +assign_share_group(struct rte_eth_dev_info *dev_info, uint16_t *share_group) +{ + unsigned int first_free = RTE_DIM(share_group_slots); + unsigned int i; + + for (i = 0; i < RTE_DIM(share_group_slots); i++) { + if (share_group_slots[i].share_group > 0) { + if (dev_info->switch_info.domain_id == share_group_slots[i].domain_id && + dev_info->switch_info.rx_domain == share_group_slots[i].rx_domain) { + *share_group = share_group_slots[i].share_group; + return 0; + } + } else if (first_free == RTE_DIM(share_group_slots)) { + first_free = i; + } + } + + if (first_free == RTE_DIM(share_group_slots)) + return -ENOSPC; + + share_group_slots[first_free].domain_id = dev_info->switch_info.domain_id; + share_group_slots[first_free].rx_domain = dev_info->switch_info.rx_domain; + share_group_slots[first_free].share_group = first_free + 1; + *share_group = share_group_slots[first_free].share_group; + + return 0; +} + +static void +try_release_share_group(struct share_group_slot *slot) +{ + uint16_t pi; + bool group_not_used = true; + + /* Check if any port still uses this share group. */ + RTE_ETH_FOREACH_DEV(pi) { + if (ports[pi].dev_info.switch_info.domain_id == slot->domain_id && + ports[pi].dev_info.switch_info.rx_domain == slot->rx_domain) { + group_not_used = false; + break; + } + } + if (group_not_used) { + slot->share_group = 0; + slot->domain_id = 0; + slot->rx_domain = 0; + } +} + +static void +try_release_share_groups(void) +{ + unsigned int i; + + /* Try release each used share group. */ + for (i = 0; i < RTE_DIM(share_group_slots); i++) + if (share_group_slots[i].share_group > 0) + try_release_share_group(&share_group_slots[i]); +} + static void eth_rx_metadata_negotiate_mp(uint16_t port_id) { @@ -3315,6 +3384,7 @@ remove_invalid_ports(void) remove_invalid_ports_in(ports_ids, &nb_ports); remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); nb_cfg_ports = nb_fwd_ports; + try_release_share_groups(); } static void @@ -4097,8 +4167,14 @@ rxtx_port_config(portid_t pid) if (rxq_share > 0 && (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) { /* Non-zero share group to enable RxQ share. */ - port->rxq[qid].conf.share_group = pid / rxq_share + 1; - port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */ + uint16_t share_group; + + if (assign_share_group(&port->dev_info, &share_group) == 0) { + port->rxq[qid].conf.share_group = share_group; + port->rxq[qid].conf.share_qid = qid; /* Equal mapping. */ + } else { + TESTPMD_LOG(INFO, "port %u: failed assigning share group\n", pid); + } } if (offloads != 0) diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index af185540c3..9b60ebd7fc 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -675,7 +675,7 @@ extern enum tx_pkt_split tx_pkt_split; extern uint8_t txonly_multi_flow; extern uint16_t txonly_flows; -extern uint32_t rxq_share; +extern uint8_t rxq_share; extern uint16_t nb_pkt_per_burst; extern uint16_t nb_pkt_flowgen_clones; diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst index ae3ef8cdf8..d0a05d6311 100644 --- a/doc/guides/testpmd_app_ug/run_app.rst +++ b/doc/guides/testpmd_app_ug/run_app.rst @@ -393,13 +393,13 @@ The command line options are: Valid range is 1 to 64. Default is 64. Reducing this value limits the number of unique UDP source ports generated. -* ``--rxq-share=[X]`` +* ``--rxq-share`` Create queues in shared Rx queue mode if device supports. - Shared Rx queues are grouped per X ports. X defaults to UINT32_MAX, - implies all ports join share group 1. Forwarding engine "shared-rxq" - should be used for shared Rx queues. This engine does Rx only and - update stream statistics accordingly. + Testpmd will assign unique share group index per each + unique switch and Rx domain. + Forwarding engine "shared-rxq" should be used for shared Rx queues. + This engine does Rx only and updates stream statistics accordingly. * ``--eth-link-speed`` -- 2.47.3

