Use a new config SOCK_RX_QUEUE_MAPPING to compile-in the socket
RX queue field and logic, instead of the XPS config.
This breaks dependency in XPS, and allows selecting it from non-XPS
use cases, as we do in the next patch.

In addition, use the new flag to wrap the logic in sk_rx_queue_get()
and protect access to the sk_rx_queue_mapping field, while keeping
the function exposed unconditionally, just like sk_rx_queue_set()
and sk_rx_queue_clear().

Signed-off-by: Tariq Toukan <tar...@nvidia.com>
Reviewed-by: Maxim Mikityanskiy <maxi...@nvidia.com>
---
 include/net/sock.h | 12 ++++++------
 net/Kconfig        |  4 ++++
 net/core/filter.c  |  2 +-
 3 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/include/net/sock.h b/include/net/sock.h
index 690e496a0e79..855c068c6c86 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -226,7 +226,7 @@ struct sock_common {
                struct hlist_nulls_node skc_nulls_node;
        };
        unsigned short          skc_tx_queue_mapping;
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
        unsigned short          skc_rx_queue_mapping;
 #endif
        union {
@@ -356,7 +356,7 @@ struct sock {
 #define sk_nulls_node          __sk_common.skc_nulls_node
 #define sk_refcnt              __sk_common.skc_refcnt
 #define sk_tx_queue_mapping    __sk_common.skc_tx_queue_mapping
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
 #define sk_rx_queue_mapping    __sk_common.skc_rx_queue_mapping
 #endif
 
@@ -1838,7 +1838,7 @@ static inline int sk_tx_queue_get(const struct sock *sk)
 
 static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
 {
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
        if (skb_rx_queue_recorded(skb)) {
                u16 rx_queue = skb_get_rx_queue(skb);
 
@@ -1852,20 +1852,20 @@ static inline void sk_rx_queue_set(struct sock *sk, 
const struct sk_buff *skb)
 
 static inline void sk_rx_queue_clear(struct sock *sk)
 {
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
        sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
 #endif
 }
 
-#ifdef CONFIG_XPS
 static inline int sk_rx_queue_get(const struct sock *sk)
 {
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
        if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
                return sk->sk_rx_queue_mapping;
+#endif
 
        return -1;
 }
-#endif
 
 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
 {
diff --git a/net/Kconfig b/net/Kconfig
index f4c32d982af6..8cea808ad9e8 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -256,9 +256,13 @@ config RFS_ACCEL
        select CPU_RMAP
        default y
 
+config SOCK_RX_QUEUE_MAPPING
+       bool
+
 config XPS
        bool
        depends on SMP
+       select SOCK_RX_QUEUE_MAPPING
        default y
 
 config HWBM
diff --git a/net/core/filter.c b/net/core/filter.c
index 9ab94e90d660..2ba03f6597bb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8816,7 +8816,7 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
                                       target_size));
                break;
        case offsetof(struct bpf_sock, rx_queue_mapping):
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
                *insn++ = BPF_LDX_MEM(
                        BPF_FIELD_SIZEOF(struct sock, sk_rx_queue_mapping),
                        si->dst_reg, si->src_reg,
-- 
2.21.0

Reply via email to