On Mon, Jun 25, 2018 at 11:04 AM, Amritha Nambiar
<amritha.namb...@intel.com> wrote:
> Use static_key for XPS maps to reduce the cost of extra map checks,
> similar to how it is used for RPS and RFS. This includes static_key
> 'xps_needed' for XPS and another for 'xps_rxqs_needed' for XPS using
> Rx queues map.
>

Acked-by: Tom Herbert <t...@quantonium.net>

> Signed-off-by: Amritha Nambiar <amritha.namb...@intel.com>
> ---
>  net/core/dev.c |   26 ++++++++++++++++++++------
>  1 file changed, 20 insertions(+), 6 deletions(-)
>
> diff --git a/net/core/dev.c b/net/core/dev.c
> index 2552556..df2a78d 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -2081,6 +2081,10 @@ int netdev_txq_to_tc(struct net_device *dev, unsigned 
> int txq)
>  EXPORT_SYMBOL(netdev_txq_to_tc);
>
>  #ifdef CONFIG_XPS
> +struct static_key xps_needed __read_mostly;
> +EXPORT_SYMBOL(xps_needed);
> +struct static_key xps_rxqs_needed __read_mostly;
> +EXPORT_SYMBOL(xps_rxqs_needed);
>  static DEFINE_MUTEX(xps_map_mutex);
>  #define xmap_dereference(P)            \
>         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
> @@ -2170,12 +2174,14 @@ static void netif_reset_xps_queues(struct net_device 
> *dev, u16 offset,
>
>         mutex_lock(&xps_map_mutex);
>
> -       dev_maps = xmap_dereference(dev->xps_rxqs_map);
> -       if (dev_maps) {
> -               nr_ids = dev->num_rx_queues;
> -               clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset,
> -                              count, true);
> -
> +       if (static_key_false(&xps_rxqs_needed)) {
> +               dev_maps = xmap_dereference(dev->xps_rxqs_map);
> +               if (dev_maps) {
> +                       nr_ids = dev->num_rx_queues;
> +                       clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
> +                                      offset, count, true);
> +               }
> +               static_key_slow_dec(&xps_rxqs_needed);
>         }
>
>         dev_maps = xmap_dereference(dev->xps_cpus_map);
> @@ -2189,6 +2195,7 @@ static void netif_reset_xps_queues(struct net_device 
> *dev, u16 offset,
>                        false);
>
>  out_no_maps:
> +       static_key_slow_dec(&xps_needed);
>         mutex_unlock(&xps_map_mutex);
>  }
>
> @@ -2297,6 +2304,10 @@ int __netif_set_xps_queue(struct net_device *dev, 
> const unsigned long *mask,
>         if (!new_dev_maps)
>                 goto out_no_new_maps;
>
> +       static_key_slow_inc(&xps_needed);
> +       if (is_rxqs_map)
> +               static_key_slow_inc(&xps_rxqs_needed);
> +
>         for (j = -1; j = attrmask_next(j, possible_mask, nr_ids),
>              j < nr_ids;) {
>                 /* copy maps belonging to foreign traffic classes */
> @@ -3450,6 +3461,9 @@ static inline int get_xps_queue(struct net_device *dev, 
> struct sk_buff *skb)
>         struct xps_map *map;
>         int queue_index = -1;
>
> +       if (!static_key_false(&xps_needed))
> +               return -1;
> +
>         rcu_read_lock();
>         dev_maps = rcu_dereference(dev->xps_cpus_map);
>         if (dev_maps) {
>

Reply via email to