The teardown race in cpumap is really hard to reproduce.  These changes
makes it easier to reproduce, for QA.

The --stress-mode now have a case of a very small queue size of 8, that helps
to trigger teardown flush to encounter a full queue, which results in calling
xdp_return_frame API, in a non-NAPI protect context.

Also increase MAX_CPUS, as my QA department have larger machines than me.

Tested-by: Jean-Tsung Hsiao <jhs...@redhat.com>
Signed-off-by: Jesper Dangaard Brouer <bro...@redhat.com>
---
 samples/bpf/xdp_redirect_cpu_kern.c |    2 +-
 samples/bpf/xdp_redirect_cpu_user.c |    4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/samples/bpf/xdp_redirect_cpu_kern.c 
b/samples/bpf/xdp_redirect_cpu_kern.c
index 8cb703671b04..0cc3d71057f0 100644
--- a/samples/bpf/xdp_redirect_cpu_kern.c
+++ b/samples/bpf/xdp_redirect_cpu_kern.c
@@ -14,7 +14,7 @@
 #include <uapi/linux/bpf.h>
 #include "bpf_helpers.h"
 
-#define MAX_CPUS 12 /* WARNING - sync with _user.c */
+#define MAX_CPUS 64 /* WARNING - sync with _user.c */
 
 /* Special map type that can XDP_REDIRECT frames to another CPU */
 struct bpf_map_def SEC("maps") cpu_map = {
diff --git a/samples/bpf/xdp_redirect_cpu_user.c 
b/samples/bpf/xdp_redirect_cpu_user.c
index f6efaefd485b..4b4d78fffe30 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -19,7 +19,7 @@ static const char *__doc__ =
 #include <arpa/inet.h>
 #include <linux/if_link.h>
 
-#define MAX_CPUS 12 /* WARNING - sync with _kern.c */
+#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
 
 /* How many xdp_progs are defined in _kern.c */
 #define MAX_PROG 5
@@ -527,7 +527,7 @@ static void stress_cpumap(void)
         * procedure.
         */
        create_cpu_entry(1,  1024, 0, false);
-       create_cpu_entry(1,   128, 0, false);
+       create_cpu_entry(1,     8, 0, false);
        create_cpu_entry(1, 16000, 0, false);
 }
 

Reply via email to