Add callback and update the code to use the rx_mode snapshot and deferred write model
Signed-off-by: I Viswanath <[email protected]> --- The suspend callback was calling the set_rx_mode ndo even when the netif was down. Since that wouldn't make sense in the new model, Now, It does that only if netif is not down. Correct me if this is a mistake drivers/net/ethernet/intel/e1000/e1000_main.c | 59 ++++++++++++------- 1 file changed, 38 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 7f078ec9c14c..3b0260d502d4 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -99,6 +99,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, static void e1000_clean_rx_ring(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring); static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_write_rx_mode(struct net_device *netdev); static void e1000_update_phy_info_task(struct work_struct *work); static void e1000_watchdog(struct work_struct *work); static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); @@ -359,7 +360,7 @@ static void e1000_configure(struct e1000_adapter *adapter) struct net_device *netdev = adapter->netdev; int i; - e1000_set_rx_mode(netdev); + netif_schedule_rx_mode_work(netdev); e1000_restore_vlan(adapter); e1000_init_manageability(adapter); @@ -823,6 +824,7 @@ static const struct net_device_ops e1000_netdev_ops = { .ndo_stop = e1000_close, .ndo_start_xmit = e1000_xmit_frame, .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_write_rx_mode = e1000_write_rx_mode, .ndo_set_mac_address = e1000_set_mac, .ndo_tx_timeout = e1000_tx_timeout, .ndo_change_mtu = e1000_change_mtu, @@ -1827,7 +1829,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) /* This is useful for sniffing bad packets. */ if (adapter->netdev->features & NETIF_F_RXALL) { /* UPE and MPE will be handled by normal PROMISC logic - * in e1000e_set_rx_mode + * in e1000_write_rx_mode */ rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ E1000_RCTL_BAM | /* RX All Bcast Pkts */ @@ -2222,26 +2224,39 @@ static int e1000_set_mac(struct net_device *netdev, void *p) return 0; } +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + bool allmulti = !!(netdev->flags & IFF_ALLMULTI); + bool promisc = !!(netdev->flags & IFF_PROMISC); + bool vlan = e1000_vlan_used(adapter); + + netif_rx_mode_set_flag(netdev, NETIF_RX_MODE_UC_SKIP, promisc); + + netif_rx_mode_set_cfg(netdev, NETIF_RX_MODE_CFG_ALLMULTI, allmulti); + netif_rx_mode_set_cfg(netdev, NETIF_RX_MODE_CFG_PROMISC, promisc); + netif_rx_mode_set_cfg(netdev, NETIF_RX_MODE_CFG_VLAN, vlan); +} + /** - * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * e1000_write_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure * - * The set_rx_mode entry point is called whenever the unicast or multicast - * address lists or the network interface flags are updated. This routine is - * responsible for configuring the hardware for proper unicast, multicast, - * promiscuous mode, and all-multi behavior. + * This routine is responsible for configuring the hardware for proper unicast, + * multicast, promiscuous mode, and all-multi behavior. **/ -static void e1000_set_rx_mode(struct net_device *netdev) +static void e1000_write_rx_mode(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct netdev_hw_addr *ha; bool use_uc = false; u32 rctl; u32 hash_value; - int i, rar_entries = E1000_RAR_ENTRIES; + int i, rar_entries = E1000_RAR_ENTRIES, ni; int mta_reg_count = E1000_NUM_MTA_REGISTERS; u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + char *ha_addr; if (!mcarray) return; @@ -2250,22 +2265,22 @@ static void e1000_set_rx_mode(struct net_device *netdev) rctl = er32(RCTL); - if (netdev->flags & IFF_PROMISC) { + if (netif_rx_mode_get_cfg(netdev, NETIF_RX_MODE_CFG_PROMISC)) { rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); rctl &= ~E1000_RCTL_VFE; } else { - if (netdev->flags & IFF_ALLMULTI) + if (netif_rx_mode_get_cfg(netdev, NETIF_RX_MODE_CFG_ALLMULTI)) rctl |= E1000_RCTL_MPE; else rctl &= ~E1000_RCTL_MPE; /* Enable VLAN filter if there is a VLAN */ - if (e1000_vlan_used(adapter)) + if (netif_rx_mode_get_cfg(netdev, NETIF_RX_MODE_CFG_VLAN)) rctl |= E1000_RCTL_VFE; } - if (netdev_uc_count(netdev) > rar_entries - 1) { + if (netif_rx_mode_uc_count(netdev) > rar_entries - 1) { rctl |= E1000_RCTL_UPE; - } else if (!(netdev->flags & IFF_PROMISC)) { + } else if (!netif_rx_mode_get_cfg(netdev, NETIF_RX_MODE_CFG_PROMISC)) { rctl &= ~E1000_RCTL_UPE; use_uc = true; } @@ -2286,23 +2301,23 @@ static void e1000_set_rx_mode(struct net_device *netdev) */ i = 1; if (use_uc) - netdev_for_each_uc_addr(ha, netdev) { + netif_rx_mode_for_each_uc_addr(ha_addr, netdev, ni) { if (i == rar_entries) break; - e1000_rar_set(hw, ha->addr, i++); + e1000_rar_set(hw, ha_addr, i++); } - netdev_for_each_mc_addr(ha, netdev) { + netif_rx_mode_for_each_mc_addr(ha_addr, netdev, ni) { if (i == rar_entries) { /* load any remaining addresses into the hash table */ u32 hash_reg, hash_bit, mta; - hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_value = e1000_hash_mc_addr(hw, ha_addr); hash_reg = (hash_value >> 5) & 0x7F; hash_bit = hash_value & 0x1F; mta = (1 << hash_bit); mcarray[hash_reg] |= mta; } else { - e1000_rar_set(hw, ha->addr, i++); + e1000_rar_set(hw, ha_addr, i++); } } @@ -5094,7 +5109,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) if (wufc) { e1000_setup_rctl(adapter); - e1000_set_rx_mode(netdev); + + if (netif_running(netdev)) + netif_schedule_rx_mode_work(netdev); rctl = er32(RCTL); -- 2.47.3
