This moves delayed work to a temporary chain to prevent a locking inversion between the delayed work lock and the alloc_sem lock. Delayed work is now processed after the delayed work lock is released. Locking order is any JFFS2 locks before the delayed work lock. --- cpukit/libfs/src/jffs2/src/fs-rtems.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/cpukit/libfs/src/jffs2/src/fs-rtems.c b/cpukit/libfs/src/jffs2/src/fs-rtems.c index 59d03effe6..1a677c9772 100644 --- a/cpukit/libfs/src/jffs2/src/fs-rtems.c +++ b/cpukit/libfs/src/jffs2/src/fs-rtems.c @@ -1282,6 +1282,8 @@ static void process_delayed_work(void) rtems_chain_node* node; mutex_lock(&delayed_work_mutex); + rtems_chain_control process_work_chain; + rtems_chain_initialize_empty(&process_work_chain); if (rtems_chain_is_empty(&delayed_work_chain)) { mutex_unlock(&delayed_work_mutex); @@ -1294,12 +1296,22 @@ static void process_delayed_work(void) rtems_chain_node* next_node = rtems_chain_next(node); if (rtems_clock_get_uptime_nanoseconds() >= work->execution_time) { rtems_chain_extract(node); - work->callback(&work->work); + rtems_chain_append(&process_work_chain, node); } node = next_node; } mutex_unlock(&delayed_work_mutex); + + node = rtems_chain_first(&process_work_chain); + while (!rtems_chain_is_tail(&process_work_chain, node)) { + work = (struct delayed_work*) node; + rtems_chain_node* next_node = rtems_chain_next(node); + rtems_chain_extract(node); + work->callback(&work->work); + node = next_node; + } } + /* Task for processing delayed work */ static rtems_task delayed_work_task( rtems_task_argument unused -- 2.39.2 _______________________________________________ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel