In dispatch mode, the assumption is that the graphs will be polled continuously: there is no way to know when packets are transferred from one graph to another.
Introduce a callback to be notified when packets are enqueue in the work queue: this can be useful if we want to wake up a thread. This sleep/wake up mechanism is deferred to the application. Signed-off-by: Christophe Fontaine <cfont...@redhat.com> --- lib/graph/graph.c | 10 +++++++--- lib/graph/rte_graph.h | 14 ++++++++++++++ lib/graph/rte_graph_model_mcore_dispatch.c | 2 ++ lib/graph/rte_graph_worker_common.h | 2 ++ 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/lib/graph/graph.c b/lib/graph/graph.c index 146d0a12b4..61159edc72 100644 --- a/lib/graph/graph.c +++ b/lib/graph/graph.c @@ -600,9 +600,13 @@ graph_clone(struct graph *parent_graph, const char *name, struct rte_graph_param graph->graph->model = parent_graph->graph->model; /* Create the graph schedule work queue */ - if (rte_graph_worker_model_get(graph->graph) == RTE_GRAPH_MODEL_MCORE_DISPATCH && - graph_sched_wq_create(graph, parent_graph, prm)) - goto graph_mem_destroy; + if (rte_graph_worker_model_get(graph->graph) == RTE_GRAPH_MODEL_MCORE_DISPATCH) { + if (graph_sched_wq_create(graph, parent_graph, prm)) + goto graph_mem_destroy; + + graph->graph->dispatch.notify_cb = prm->dispatch.notify_cb; + graph->graph->dispatch.cb_priv = prm->dispatch.cb_priv; + } /* Call init() of the all the nodes in the graph */ if (graph_node_init(graph)) diff --git a/lib/graph/rte_graph.h b/lib/graph/rte_graph.h index 097d0dc9d5..16b1dbac3f 100644 --- a/lib/graph/rte_graph.h +++ b/lib/graph/rte_graph.h @@ -150,6 +150,18 @@ typedef void (*rte_node_fini_t)(const struct rte_graph *graph, typedef int (*rte_graph_cluster_stats_cb_t)(bool is_first, bool is_last, void *cookie, const struct rte_graph_cluster_node_stats *stats); + +/** + * Graph dispatch enqueue notification callback. + * + * @param graph + * Current graph + * @param cb_priv + * Opaque argument given to the callback. + * + */ +typedef void (*packets_enqueued_cb)(struct rte_graph *graph, uint64_t priv); + /** * Structure to hold configuration parameters for creating the graph. * @@ -172,6 +184,8 @@ struct rte_graph_param { struct { uint32_t wq_size_max; /**< Maximum size of workqueue for dispatch model. */ uint32_t mp_capacity; /**< Capacity of memory pool for dispatch model. */ + packets_enqueued_cb notify_cb; + uint64_t cb_priv; } dispatch; }; }; diff --git a/lib/graph/rte_graph_model_mcore_dispatch.c b/lib/graph/rte_graph_model_mcore_dispatch.c index 70f0069bc1..706b5469f0 100644 --- a/lib/graph/rte_graph_model_mcore_dispatch.c +++ b/lib/graph/rte_graph_model_mcore_dispatch.c @@ -102,6 +102,8 @@ __graph_sched_node_enqueue(struct rte_node *node, struct rte_graph *graph) if (node->idx > 0) goto submit_again; + if (graph->dispatch.notify_cb) + graph->dispatch.notify_cb(graph, graph->dispatch.cb_priv); return true; fallback: diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_worker_common.h index aef0f65673..221deceaee 100644 --- a/lib/graph/rte_graph_worker_common.h +++ b/lib/graph/rte_graph_worker_common.h @@ -68,6 +68,8 @@ struct __rte_cache_aligned rte_graph { unsigned int lcore_id; /**< The graph running Lcore. */ struct rte_ring *wq; /**< The work-queue for pending streams. */ struct rte_mempool *mp; /**< The mempool for scheduling streams. */ + packets_enqueued_cb notify_cb; /**< callback when a packet crosses lcores */ + uint64_t cb_priv; /**< Opaque parameter for notify_cb */ } dispatch; /** Only used by dispatch model */ }; SLIST_ENTRY(rte_graph) next; /* The next for rte_graph list */ -- 2.43.5