Extract a helper function to get the pointer of 'struct nfp_net_hw' for both normal port and representor pot, this will make the operation function can be used for both type port.
Signed-off-by: Chaoyong He <[email protected]> Reviewed-by: Peng Zhang <[email protected]> --- drivers/net/nfp/flower/nfp_flower.c | 64 ++----------------------- drivers/net/nfp/nfp_net_common.c | 74 ++++++++++++++++------------- drivers/net/nfp/nfp_net_common.h | 1 + 3 files changed, 47 insertions(+), 92 deletions(-) diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c index 246dd2d454..0727e7fd9f 100644 --- a/drivers/net/nfp/flower/nfp_flower.c +++ b/drivers/net/nfp/flower/nfp_flower.c @@ -21,62 +21,6 @@ #define CTRL_VNIC_NB_DESC 512 -static void -nfp_pf_repr_enable_queues(struct rte_eth_dev *dev) -{ - uint16_t i; - struct nfp_hw *hw; - uint64_t enabled_queues = 0; - struct nfp_flower_representor *repr; - - repr = dev->data->dev_private; - hw = &repr->app_fw_flower->pf_hw->super; - - /* Enabling the required TX queues in the device */ - for (i = 0; i < dev->data->nb_tx_queues; i++) - enabled_queues |= (1 << i); - - nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues); - - enabled_queues = 0; - - /* Enabling the required RX queues in the device */ - for (i = 0; i < dev->data->nb_rx_queues; i++) - enabled_queues |= (1 << i); - - nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues); -} - -static void -nfp_pf_repr_disable_queues(struct rte_eth_dev *dev) -{ - uint32_t update; - uint32_t new_ctrl; - struct nfp_hw *hw; - struct nfp_net_hw *net_hw; - struct nfp_flower_representor *repr; - - repr = dev->data->dev_private; - net_hw = repr->app_fw_flower->pf_hw; - hw = &net_hw->super; - - nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0); - nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0); - - new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE; - update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING | - NFP_NET_CFG_UPDATE_MSIX; - - if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG) - new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG; - - /* If an error when reconfig we avoid to change hw state */ - if (nfp_reconfig(hw, new_ctrl, update) < 0) - return; - - hw->ctrl = new_ctrl; -} - int nfp_flower_pf_start(struct rte_eth_dev *dev) { @@ -93,10 +37,10 @@ nfp_flower_pf_start(struct rte_eth_dev *dev) hw = &net_hw->super; /* Disabling queues just in case... */ - nfp_pf_repr_disable_queues(dev); + nfp_net_disable_queues(dev); /* Enabling the required queues in the device */ - nfp_pf_repr_enable_queues(dev); + nfp_net_enable_queues(dev); new_ctrl = nfp_check_offloads(dev); @@ -157,7 +101,7 @@ nfp_flower_pf_stop(struct rte_eth_dev *dev) repr = dev->data->dev_private; hw = repr->app_fw_flower->pf_hw; - nfp_pf_repr_disable_queues(dev); + nfp_net_disable_queues(dev); /* Clear queues */ for (i = 0; i < dev->data->nb_tx_queues; i++) { @@ -207,7 +151,7 @@ nfp_flower_pf_close(struct rte_eth_dev *dev) * We assume that the DPDK application is stopping all the * threads/queues before calling the device close function. */ - nfp_pf_repr_disable_queues(dev); + nfp_net_disable_queues(dev); /* Clear queues */ for (i = 0; i < dev->data->nb_tx_queues; i++) { diff --git a/drivers/net/nfp/nfp_net_common.c b/drivers/net/nfp/nfp_net_common.c index d43a071a42..1be0d7d060 100644 --- a/drivers/net/nfp/nfp_net_common.c +++ b/drivers/net/nfp/nfp_net_common.c @@ -233,6 +233,22 @@ nfp_net_mbox_reconfig(struct nfp_net_hw *net_hw, return nn_cfg_readl(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET); } +struct nfp_net_hw * +nfp_net_get_hw(const struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + + if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) != 0) { + struct nfp_flower_representor *repr; + repr = dev->data->dev_private; + hw = repr->app_fw_flower->pf_hw; + } else { + hw = dev->data->dev_private; + } + + return hw; +} + /* * Configure an Ethernet device. * @@ -252,7 +268,7 @@ nfp_net_configure(struct rte_eth_dev *dev) struct rte_eth_rxmode *rxmode; struct rte_eth_txmode *txmode; - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); dev_conf = &dev->data->dev_conf; rxmode = &dev_conf->rxmode; txmode = &dev_conf->txmode; @@ -329,7 +345,7 @@ nfp_net_enable_queues(struct rte_eth_dev *dev) { struct nfp_net_hw *hw; - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); nfp_enable_queues(&hw->super, dev->data->nb_rx_queues, dev->data->nb_tx_queues); @@ -340,7 +356,7 @@ nfp_net_disable_queues(struct rte_eth_dev *dev) { struct nfp_net_hw *net_hw; - net_hw = dev->data->dev_private; + net_hw = nfp_net_get_hw(dev); nfp_disable_queues(&net_hw->super); } @@ -367,7 +383,7 @@ nfp_net_set_mac_addr(struct rte_eth_dev *dev, struct nfp_hw *hw; struct nfp_net_hw *net_hw; - net_hw = dev->data->dev_private; + net_hw = nfp_net_get_hw(dev); hw = &net_hw->super; if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 && (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) { @@ -407,7 +423,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, return -ENOMEM; } - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) { PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO"); @@ -443,7 +459,7 @@ nfp_check_offloads(struct rte_eth_dev *dev) struct nfp_net_hw *hw; struct rte_eth_conf *dev_conf; - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); cap = hw->super.cap; dev_conf = &dev->data->dev_conf; @@ -510,14 +526,8 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) uint32_t new_ctrl; struct nfp_hw *hw; struct nfp_net_hw *net_hw; - struct nfp_flower_representor *repr; - if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) != 0) { - repr = dev->data->dev_private; - net_hw = repr->app_fw_flower->pf_hw; - } else { - net_hw = dev->data->dev_private; - } + net_hw = nfp_net_get_hw(dev); hw = &net_hw->super; if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) { @@ -551,7 +561,7 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev) struct nfp_hw *hw; struct nfp_net_hw *net_hw; - net_hw = dev->data->dev_private; + net_hw = nfp_net_get_hw(dev); hw = &net_hw->super; if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) { @@ -588,7 +598,7 @@ nfp_net_link_update(struct rte_eth_dev *dev, struct rte_eth_link link; struct nfp_eth_table *nfp_eth_table; - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); memset(&link, 0, sizeof(struct rte_eth_link)); @@ -654,7 +664,7 @@ nfp_net_stats_get(struct rte_eth_dev *dev, if (stats == NULL) return -EINVAL; - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats)); @@ -732,7 +742,7 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) uint16_t i; struct nfp_net_hw *hw; - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); /* Reading per RX ring stats */ for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -794,7 +804,7 @@ nfp_net_xstats_size(const struct rte_eth_dev *dev) const uint32_t size = RTE_DIM(nfp_net_xstats); /* If the device is a VF, then there will be no MAC stats */ - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); if (hw->mac_stats == NULL) { for (count = 0; count < size; count++) { if (nfp_net_xstats[count].group == NFP_XSTAT_GROUP_MAC) @@ -828,7 +838,7 @@ nfp_net_xstats_value(const struct rte_eth_dev *dev, struct nfp_net_hw *hw; struct nfp_xstat xstat; - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); xstat = nfp_net_xstats[index]; if (xstat.group == NFP_XSTAT_GROUP_MAC) @@ -967,7 +977,7 @@ nfp_net_xstats_reset(struct rte_eth_dev *dev) uint32_t read_size; struct nfp_net_hw *hw; - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); read_size = nfp_net_xstats_size(dev); for (id = 0; id < read_size; id++) { @@ -1015,7 +1025,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) uint16_t max_tx_desc; struct nfp_net_hw *hw; - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); nfp_net_rx_desc_limits(hw, &min_rx_desc, &max_rx_desc); nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc); @@ -1242,7 +1252,7 @@ nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, /* Make sure all updates are written before un-masking */ rte_wmb(); - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id), NFP_NET_CFG_ICR_UNMASKED); return 0; @@ -1263,7 +1273,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, /* Make sure all updates are written before un-masking */ rte_wmb(); - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id), NFP_NET_CFG_ICR_RXTX); return 0; @@ -1301,7 +1311,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev) struct nfp_net_hw *hw; struct rte_pci_device *pci_dev; - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); pci_dev = RTE_ETH_DEV_TO_PCI(dev); /* Make sure all updates are written before un-masking */ @@ -1376,7 +1386,7 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, { struct nfp_net_hw *hw; - hw = dev->data->dev_private; + hw = nfp_net_get_hw(dev); /* MTU setting is forbidden if port is started */ if (dev->data->dev_started) { @@ -1412,7 +1422,7 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, struct nfp_net_hw *net_hw; uint32_t rxvlan_ctrl = 0; - net_hw = dev->data->dev_private; + net_hw = nfp_net_get_hw(dev); hw = &net_hw->super; rx_offload = dev->data->dev_conf.rxmode.offloads; new_ctrl = hw->ctrl; @@ -1462,7 +1472,7 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev, struct nfp_hw *hw; struct nfp_net_hw *net_hw; - net_hw = dev->data->dev_private; + net_hw = nfp_net_get_hw(dev); hw = &net_hw->super; if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { @@ -1518,7 +1528,7 @@ nfp_net_reta_update(struct rte_eth_dev *dev, struct nfp_hw *hw; struct nfp_net_hw *net_hw; - net_hw = dev->data->dev_private; + net_hw = nfp_net_get_hw(dev); hw = &net_hw->super; if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) @@ -1551,7 +1561,7 @@ nfp_net_reta_query(struct rte_eth_dev *dev, struct nfp_hw *hw; struct nfp_net_hw *net_hw; - net_hw = dev->data->dev_private; + net_hw = nfp_net_get_hw(dev); hw = &net_hw->super; if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) @@ -1601,7 +1611,7 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev, struct nfp_net_hw *net_hw; uint32_t cfg_rss_ctrl = 0; - net_hw = dev->data->dev_private; + net_hw = nfp_net_get_hw(dev); hw = &net_hw->super; /* Writing the key byte by byte */ @@ -1657,7 +1667,7 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, struct nfp_hw *hw; struct nfp_net_hw *net_hw; - net_hw = dev->data->dev_private; + net_hw = nfp_net_get_hw(dev); hw = &net_hw->super; rss_hf = rss_conf->rss_hf; @@ -1698,7 +1708,7 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, uint32_t cfg_rss_ctrl; struct nfp_net_hw *net_hw; - net_hw = dev->data->dev_private; + net_hw = nfp_net_get_hw(dev); hw = &net_hw->super; if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) diff --git a/drivers/net/nfp/nfp_net_common.h b/drivers/net/nfp/nfp_net_common.h index 829e9c5333..e242251bc2 100644 --- a/drivers/net/nfp/nfp_net_common.h +++ b/drivers/net/nfp/nfp_net_common.h @@ -237,6 +237,7 @@ void nfp_net_cfg_read_version(struct nfp_net_hw *hw); int nfp_net_firmware_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size); int nfp_repr_firmware_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size); bool nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version); +struct nfp_net_hw *nfp_net_get_hw(const struct rte_eth_dev *dev); #define NFP_PRIV_TO_APP_FW_NIC(app_fw_priv)\ ((struct nfp_app_fw_nic *)app_fw_priv) -- 2.39.1

