On Sun, 11 Aug 2024 21:29:57 +0530
Rakesh Kudurumalla <rkuduruma...@marvell.com> wrote:

> Race condition between jobstats and time metrics
> for forwarding and flushing is maintained using spinlock.
> Timer metrics are not displayed properly due to the
> frequent unavailability of the lock.This patch fixes
> the issue by introducing a delay before acquiring
> the lock in the loop. This delay allows for betteravailability
> of the lock, ensuring that show_lcore_stats() can
> periodically update the statistics even when forwarding
> jobs are running.
> 
> Fixes: 204896f8d66c ("examples/l2fwd-jobstats: add new example")
> Cc: sta...@dpdk.org
> 
> Signed-off-by: Rakesh Kudurumalla <rkuduruma...@marvell.com>

The original code is a mess here.
The whole idle job loop here is the problem.
It should use rte_timer_next_ticks() to know when next timer is
about to expire and use that.

And instead of fighting with spin lock, use ticket lock which will
cause in-order waiting.

Something like the following (untested):

diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 308b8edd20..9586d90ab6 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -27,7 +27,7 @@
 #include <rte_ethdev.h>
 #include <rte_mempool.h>
 #include <rte_mbuf.h>
-#include <rte_spinlock.h>
+#include <rte_ticketlock.h>
 
 #include <rte_errno.h>
 #include <rte_jobstats.h>
@@ -80,8 +80,7 @@ struct __rte_cache_aligned lcore_queue_conf {
        struct rte_jobstats idle_job;
        struct rte_jobstats_context jobs_context;
 
-       RTE_ATOMIC(uint16_t) stats_read_pending;
-       rte_spinlock_t lock;
+       rte_ticketlock_t lock;
 };
 /* >8 End of list of queues to be polled for given lcore. */
 struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
@@ -151,9 +150,7 @@ show_lcore_stats(unsigned lcore_id)
        uint64_t collection_time = rte_get_timer_cycles();
 
        /* Ask forwarding thread to give us stats. */
-       rte_atomic_store_explicit(&qconf->stats_read_pending, 1, 
rte_memory_order_relaxed);
-       rte_spinlock_lock(&qconf->lock);
-       rte_atomic_store_explicit(&qconf->stats_read_pending, 0, 
rte_memory_order_relaxed);
+       rte_ticketlock_lock(&qconf->lock);
 
        /* Collect context statistics. */
        stats_period = ctx->state_time - ctx->start_time;
@@ -195,7 +192,7 @@ show_lcore_stats(unsigned lcore_id)
        idle_exec_max = qconf->idle_job.max_exec_time;
        rte_jobstats_reset(&qconf->idle_job);
 
-       rte_spinlock_unlock(&qconf->lock);
+       rte_ticketlock_unlock(&qconf->lock);
 
        exec -= idle_exec;
        busy = exec + management;
@@ -478,11 +475,11 @@ l2fwd_main_loop(void)
        unsigned lcore_id;
        unsigned i, portid;
        struct lcore_queue_conf *qconf;
-       uint8_t stats_read_pending = 0;
-       uint8_t need_manage;
+       uint64_t hz;
 
        lcore_id = rte_lcore_id();
        qconf = &lcore_queue_conf[lcore_id];
+       hz = rte_get_timer_hz();
 
        if (qconf->n_rx_port == 0) {
                RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
@@ -502,47 +499,22 @@ l2fwd_main_loop(void)
 
        /* Minimize impact of stats reading. 8< */
        for (;;) {
-               rte_spinlock_lock(&qconf->lock);
-
-               do {
-                       rte_jobstats_context_start(&qconf->jobs_context);
-
-                       /* Do the Idle job:
-                        * - Read stats_read_pending flag
-                        * - check if some real job need to be executed
-                        */
-                       rte_jobstats_start(&qconf->jobs_context, 
&qconf->idle_job);
-
-                       uint64_t repeats = 0;
-
-                       do {
-                               uint8_t i;
-                               uint64_t now = rte_get_timer_cycles();
-
-                               repeats++;
-                               need_manage = qconf->flush_timer.expire < now;
-                               /* Check if we was esked to give a stats. */
-                               stats_read_pending = rte_atomic_load_explicit(
-                                       &qconf->stats_read_pending,
-                                       rte_memory_order_relaxed);
-                               need_manage |= stats_read_pending;
 
-                               for (i = 0; i < qconf->n_rx_port && 
!need_manage; i++)
-                                       need_manage = 
qconf->rx_timers[i].expire < now;
+               rte_ticketlock_lock(&qconf->lock);
 
-                       } while (!need_manage);
+               rte_jobstats_context_start(&qconf->jobs_context);
+               rte_jobstats_start(&qconf->jobs_context, &qconf->idle_job);
+               rte_timer_manage();
+               rte_jobstats_context_finish(&qconf->jobs_context);
 
-                       if (likely(repeats != 1))
-                               rte_jobstats_finish(&qconf->idle_job, 
qconf->idle_job.target);
-                       else
-                               rte_jobstats_abort(&qconf->idle_job);
+               int64_t next_ticks = rte_timer_next_ticks();
 
-                       rte_timer_manage();
-                       rte_jobstats_context_finish(&qconf->jobs_context);
-               } while (likely(stats_read_pending == 0));
+               rte_ticketlock_unlock(&qconf->lock);
 
-               rte_spinlock_unlock(&qconf->lock);
-               rte_pause();
+               if (next_ticks > 0)
+                       rte_delay_us((1000000 * next_ticks) / hz);
+               else
+                       rte_pause();
        }
        /* >8 End of minimize impact of stats reading. */
 }
@@ -972,7 +944,7 @@ main(int argc, char **argv)
        RTE_LCORE_FOREACH(lcore_id) {
                qconf = &lcore_queue_conf[lcore_id];
 
-               rte_spinlock_init(&qconf->lock);
+               rte_ticketlock_init(&qconf->lock);
 
                if (rte_jobstats_context_init(&qconf->jobs_context) != 0)
                        rte_panic("Jobs stats context for core %u init 
failed\n", lcore_id);

Reply via email to