Currently, rte_flow-related checks use `rte_eth_dev` to check for max VFs. With coming rework of flows, the aim is to make the code as multiprocess agnostic as possible, and `rte_eth_dev` pointer is process-local.
To support this in VF-related checks, cache max_vfs in ixgbe adapter during device init and read it in the rte_flow check paths to avoid direct dependency on `rte_eth_dev`. Signed-off-by: Anatoly Burakov <[email protected]> --- drivers/net/intel/ixgbe/ixgbe_ethdev.c | 3 ++- drivers/net/intel/ixgbe/ixgbe_ethdev.h | 4 ++++ drivers/net/intel/ixgbe/ixgbe_flow.c | 8 ++++---- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/net/intel/ixgbe/ixgbe_ethdev.c b/drivers/net/intel/ixgbe/ixgbe_ethdev.c index 6f758b802d..4cfaf47a38 100644 --- a/drivers/net/intel/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/intel/ixgbe/ixgbe_ethdev.c @@ -1084,7 +1084,7 @@ ixgbe_parse_devargs(struct ixgbe_adapter *adapter, static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) { - struct ixgbe_adapter *ad = eth_dev->data->dev_private; + struct ixgbe_adapter *ad = IXGBE_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct ixgbe_hw *hw = @@ -1151,6 +1151,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) hw->vendor_id = pci_dev->id.vendor_id; hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; hw->allow_unsupported_sfp = 1; + ad->max_vfs = pci_dev->max_vfs; /* Initialize the shared code (base driver) */ #ifdef RTE_LIBRTE_IXGBE_BYPASS diff --git a/drivers/net/intel/ixgbe/ixgbe_ethdev.h b/drivers/net/intel/ixgbe/ixgbe_ethdev.h index a014dbff10..04e5e014d9 100644 --- a/drivers/net/intel/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/intel/ixgbe/ixgbe_ethdev.h @@ -491,6 +491,7 @@ struct ixgbe_adapter { /* Used for limiting SDP3 TX_DISABLE checks */ uint8_t sdp3_no_tx_disable; + uint16_t max_vfs; /* Used for VF link sync with PF's physical and logical (by checking * mailbox status) link status. @@ -515,6 +516,9 @@ uint16_t ixgbe_vf_representor_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts #define IXGBE_DEV_FDIR_CONF(dev) \ (&((struct ixgbe_adapter *)(dev)->data->dev_private)->fdir_conf) +#define IXGBE_DEV_PRIVATE_TO_ADAPTER(adapter) \ + ((struct ixgbe_adapter *)adapter) + #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ (&((struct ixgbe_adapter *)adapter)->hw) diff --git a/drivers/net/intel/ixgbe/ixgbe_flow.c b/drivers/net/intel/ixgbe/ixgbe_flow.c index 01cd4f9bde..c40ef02f23 100644 --- a/drivers/net/intel/ixgbe/ixgbe_flow.c +++ b/drivers/net/intel/ixgbe/ixgbe_flow.c @@ -1272,7 +1272,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, const struct rte_flow_item_e_tag *e_tag_mask; const struct rte_flow_action *act; const struct rte_flow_action_vf *act_vf; - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct ixgbe_adapter *ad = IXGBE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); if (!pattern) { rte_flow_error_set(error, EINVAL, @@ -1404,7 +1404,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, act_vf = (const struct rte_flow_action_vf *)act->conf; filter->pool = act_vf->id; } else { - filter->pool = pci_dev->max_vfs; + filter->pool = ad->max_vfs; } /* check if the next not void item is END */ @@ -1430,7 +1430,7 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, { int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct ixgbe_adapter *ad = IXGBE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); uint16_t vf_num; ret = cons_parse_l2_tn_filter(dev, attr, pattern, @@ -1447,7 +1447,7 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } - vf_num = pci_dev->max_vfs; + vf_num = ad->max_vfs; if (l2_tn_filter->pool > vf_num) return -rte_errno; -- 2.47.3

