The mutex objects use the owner field of the thread queues for the mutex owner. Use this and add a deadlock detection to _Thread_queue_Enqueue_critical() for thread queues with an owner.
Update #2412. Update #2556. --- cpukit/sapi/src/interrtext.c | 5 +- cpukit/score/include/rtems/score/interr.h | 3 +- cpukit/score/include/rtems/score/thread.h | 24 ++ cpukit/score/include/rtems/score/threadq.h | 35 +- cpukit/score/include/rtems/score/threadqimpl.h | 33 ++ cpukit/score/src/coremutexseize.c | 9 + cpukit/score/src/mutex.c | 4 + cpukit/score/src/threadqenqueue.c | 108 ++++++ testsuites/sptests/spinternalerror02/init.c | 4 +- .../spinternalerror02/spinternalerror02.scn | 3 +- testsuites/sptests/spmutex01/init.c | 413 +++++++++++++++++++-- testsuites/sptests/spmutex01/spmutex01.doc | 5 + testsuites/sptests/spsyslock01/init.c | 79 ++-- 13 files changed, 638 insertions(+), 87 deletions(-) diff --git a/cpukit/sapi/src/interrtext.c b/cpukit/sapi/src/interrtext.c index 3ae7315..8408061 100644 --- a/cpukit/sapi/src/interrtext.c +++ b/cpukit/sapi/src/interrtext.c @@ -7,7 +7,7 @@ */ /* - * Copyright (c) 2012-2015 embedded brains GmbH. All rights reserved. + * Copyright (c) 2012, 2016 embedded brains GmbH. All rights reserved. * * embedded brains GmbH * Dornierstr. 4 @@ -54,7 +54,8 @@ static const char *const internal_error_text[] = { "INTERNAL_ERROR_CPU_ISR_INSTALL_VECTOR", "INTERNAL_ERROR_RESOURCE_IN_USE", "INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL", - "INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL" + "INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL", + "INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK" }; const char *rtems_internal_error_text( rtems_fatal_code error ) diff --git a/cpukit/score/include/rtems/score/interr.h b/cpukit/score/include/rtems/score/interr.h index ca48db2..cc3dca4 100644 --- a/cpukit/score/include/rtems/score/interr.h +++ b/cpukit/score/include/rtems/score/interr.h @@ -163,7 +163,8 @@ typedef enum { INTERNAL_ERROR_CPU_ISR_INSTALL_VECTOR, INTERNAL_ERROR_RESOURCE_IN_USE, INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL, - INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL + INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL, + INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK } Internal_errors_Core_list; typedef CPU_Uint32ptr Internal_errors_t; diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h index 46c222f..54ecd7c 100644 --- a/cpukit/score/include/rtems/score/thread.h +++ b/cpukit/score/include/rtems/score/thread.h @@ -264,6 +264,30 @@ typedef struct { RBTree_Node RBTree; } Node; +#if defined(RTEMS_SMP) + /** + * @brief Control block used to recursively acquire thread queue owners. + * + * @see _Thread_queue_Owner_acquire_recursive(). + */ + struct { + /** + * @brief Chain node to register in Thread_queue_Context::Recursive_owners. + */ + Chain_Node Node; + + /** + * @brief ISR lock context used for _Thread_Lock_acquire_critical(). + */ + ISR_lock_Context Context; + + /** + * @brief Lock returned by _Thread_Lock_acquire_critical(). + */ + void *lock; + } Recursive_owner; +#endif + #if defined(RTEMS_MULTIPROCESSING) /* * @brief This field is the identifier of the remote object this thread is diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h index a4e5292..1a365fa 100644 --- a/cpukit/score/include/rtems/score/threadq.h +++ b/cpukit/score/include/rtems/score/threadq.h @@ -42,6 +42,17 @@ extern "C" { typedef struct _Thread_Control Thread_Control; +/** + * @brief Thread queue deadlock callout. + * + * @param the_thread The thread that detected the deadlock. + * + * @see _Thread_queue_Context_set_deadlock_callout(). + */ +typedef void ( *Thread_queue_Deadlock_callout )( + Thread_Control *the_thread +); + #if defined(RTEMS_MULTIPROCESSING) /** * @brief Multiprocessing (MP) support callout for thread queue operations. @@ -50,6 +61,8 @@ typedef struct _Thread_Control Thread_Control; * control is actually a thread proxy if and only if * _Objects_Is_local_id( the_proxy->Object.id ) is false. * @param mp_id Object identifier of the object containing the thread queue. + * + * @see _Thread_queue_Context_set_MP_callout(). */ typedef void ( *Thread_queue_MP_callout )( Thread_Control *the_proxy, @@ -79,6 +92,17 @@ typedef struct { uint32_t expected_thread_dispatch_disable_level; /** + * @brief Invoked in case of a detected deadlock. + * + * Must be initialized for _Thread_queue_Enqueue_critical() in case the + * thread queue may have an owner, e.g. for mutex objects. + * + * @see _Thread_queue_Context_set_deadlock_callout(). + */ + Thread_queue_Deadlock_callout deadlock_callout; + +#if defined(RTEMS_MULTIPROCESSING) + /** * @brief Callout to unblock the thread in case it is actually a thread * proxy. * @@ -88,9 +112,18 @@ typedef struct { * * @see _Thread_queue_Context_set_MP_callout(). */ -#if defined(RTEMS_MULTIPROCESSING) Thread_queue_MP_callout mp_callout; #endif + +#if defined(RTEMS_SMP) + /** + * @brief Chain with recursive owners. + * + * Internally used on SMP configurations by _Thread_queue_Enqueue_critical(). + * There is no need to initialize this field. + */ + Chain_Control Recursive_owners; +#endif } Thread_queue_Context; /** diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h index 73d4de2..4e8c6e1 100644 --- a/cpukit/score/include/rtems/score/threadqimpl.h +++ b/cpukit/score/include/rtems/score/threadqimpl.h @@ -54,6 +54,16 @@ typedef struct { } Thread_queue_Syslock_queue; /** + * @brief Sets the thread wait return code to STATUS_DEADLOCK. + */ +void _Thread_queue_Deadlock_status( Thread_Control *the_thread ); + +/** + * @brief Results in an INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK fatal error. + */ +void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread ); + +/** * @brief Initializes a thread queue context. * * @param queue_context The thread queue context to initialize. @@ -64,6 +74,7 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_initialize( { #if defined(RTEMS_DEBUG) queue_context->expected_thread_dispatch_disable_level = 0xdeadbeef; + queue_context->deadlock_callout = _Thread_queue_Deadlock_fatal; #if defined(RTEMS_MULTIPROCESSING) queue_context->mp_callout = NULL; #endif @@ -91,6 +102,28 @@ _Thread_queue_Context_set_expected_level( } /** + * @brief Sets the deadlock callout in the thread queue + * context. + * + * A deadlock callout must be provided for _Thread_queue_Enqueue_critical() + * operations that operate on thread queues which may have an owner, e.g. mutex + * objects. Available deadlock callouts are _Thread_queue_Deadlock_status() + * and _Thread_queue_Deadlock_fatal(). + * + * @param queue_context The thread queue context. + * @param deadlock_callout The deadlock callout. + * + * @see _Thread_queue_Enqueue_critical(). + */ +RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_deadlock_callout( + Thread_queue_Context *queue_context, + Thread_queue_Deadlock_callout deadlock_callout +) +{ + queue_context->deadlock_callout = deadlock_callout; +} + +/** * @brief Sets the MP callout in the thread queue context. * * @param queue_context The thread queue context. diff --git a/cpukit/score/src/coremutexseize.c b/cpukit/score/src/coremutexseize.c index ab743c4..d962961 100644 --- a/cpukit/score/src/coremutexseize.c +++ b/cpukit/score/src/coremutexseize.c @@ -64,6 +64,11 @@ Status_Control _CORE_mutex_Seize_slow( _Thread_queue_Context_set_expected_level( queue_context, 2 ); #endif + _Thread_queue_Context_set_deadlock_callout( + queue_context, + _Thread_queue_Deadlock_status + ); + _Thread_queue_Enqueue_critical( &the_mutex->Wait_queue.Queue, CORE_MUTEX_TQ_OPERATIONS, @@ -91,6 +96,10 @@ Status_Control _CORE_mutex_Seize_no_protocol_slow( { if ( wait ) { _Thread_queue_Context_set_expected_level( queue_context, 1 ); + _Thread_queue_Context_set_deadlock_callout( + queue_context, + _Thread_queue_Deadlock_status + ); _Thread_queue_Enqueue_critical( &the_mutex->Wait_queue.Queue, operations, diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c index ed374a0..093d805 100644 --- a/cpukit/score/src/mutex.c +++ b/cpukit/score/src/mutex.c @@ -110,6 +110,10 @@ static void _Mutex_Acquire_slow( { _Thread_Inherit_priority( owner, executing ); _Thread_queue_Context_set_expected_level( queue_context, 1 ); + _Thread_queue_Context_set_deadlock_callout( + queue_context, + _Thread_queue_Deadlock_fatal + ); _Thread_queue_Enqueue_critical( &mutex->Queue.Queue, MUTEX_TQ_OPERATIONS, diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c index 3be7d58..dbd8504 100644 --- a/cpukit/score/src/threadqenqueue.c +++ b/cpukit/score/src/threadqenqueue.c @@ -34,6 +34,104 @@ #define THREAD_QUEUE_READY_AGAIN \ (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN) +#define THREAD_OF_RECURSIVE_OWNER_NODE( node ) \ + RTEMS_CONTAINER_OF( node, Thread_Control, Wait.Recursive_owner.Node ) + +static void _Thread_queue_Owner_release_recursive( + Thread_queue_Context *queue_context +) +{ +#if defined(RTEMS_SMP) + Chain_Node *head; + Chain_Node *node; + + head = _Chain_Head( &queue_context->Recursive_owners ); + node = _Chain_Last( &queue_context->Recursive_owners ); + + while ( head != node ) { + Thread_Control *node_thread; + + node_thread = THREAD_OF_RECURSIVE_OWNER_NODE( node ); + + _Thread_Lock_release_critical( + node_thread->Wait.Recursive_owner.lock, + &node_thread->Wait.Recursive_owner.Context + ); + + node = _Chain_Previous( node ); + } +#else + (void) queue_context; +#endif +} + +static bool _Thread_queue_Owner_acquire_recursive( + Thread_queue_Queue *queue, + Thread_Control *the_thread, + Thread_queue_Context *queue_context +) +{ +#if defined(RTEMS_SMP) + Thread_Control *previous; + + _Chain_Initialize_empty( &queue_context->Recursive_owners ); + previous = the_thread; +#else + (void) queue_context; +#endif + + while ( true ) { + Thread_Control *owner; + + owner = queue->owner; + + if ( owner == NULL ) { + break; + } + + if ( owner == the_thread ) { + _Thread_queue_Owner_release_recursive( queue_context ); + return false; + } + +#if defined(RTEMS_SMP) + previous->Wait.Recursive_owner.lock = _Thread_Lock_acquire_critical( + owner, + &previous->Wait.Recursive_owner.Context + ); + + _Chain_Append_unprotected( + &queue_context->Recursive_owners, + &previous->Wait.Recursive_owner.Node + ); + + previous = owner; +#endif + + queue = owner->Wait.queue; + + if ( queue == NULL ) { + break; + } + } + + return true; +} + +void _Thread_queue_Deadlock_status( Thread_Control *the_thread ) +{ + the_thread->Wait.return_code = STATUS_DEADLOCK; +} + +void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread ) +{ + _Terminate( + INTERNAL_ERROR_CORE, + false, + INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK + ); +} + void _Thread_queue_Enqueue_critical( Thread_queue_Queue *queue, const Thread_queue_Operations *operations, @@ -52,6 +150,14 @@ void _Thread_queue_Enqueue_critical( } #endif + if ( + !_Thread_queue_Owner_acquire_recursive( queue, the_thread, queue_context ) + ) { + _Thread_queue_Queue_release( queue, &queue_context->Lock_context ); + ( *queue_context->deadlock_callout )( the_thread ); + return; + } + _Thread_Lock_set( the_thread, &queue->Lock ); the_thread->Wait.return_code = STATUS_SUCCESSFUL; @@ -60,6 +166,8 @@ void _Thread_queue_Enqueue_critical( ( *operations->enqueue )( queue, the_thread ); + _Thread_queue_Owner_release_recursive( queue_context ); + _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK ); cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context ); _Thread_queue_Queue_release( queue, &queue_context->Lock_context ); diff --git a/testsuites/sptests/spinternalerror02/init.c b/testsuites/sptests/spinternalerror02/init.c index cbc81a5..eac90a0 100644 --- a/testsuites/sptests/spinternalerror02/init.c +++ b/testsuites/sptests/spinternalerror02/init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 embedded brains GmbH. All rights reserved. + * Copyright (c) 2012, 2016 embedded brains GmbH. All rights reserved. * * embedded brains GmbH * Donierstr. 4 @@ -36,7 +36,7 @@ static void test_internal_error_text(void) } while ( text != text_last ); rtems_test_assert( - error - 3 == INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL + error - 3 == INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK ); } diff --git a/testsuites/sptests/spinternalerror02/spinternalerror02.scn b/testsuites/sptests/spinternalerror02/spinternalerror02.scn index c6e85b1..ff04560 100644 --- a/testsuites/sptests/spinternalerror02/spinternalerror02.scn +++ b/testsuites/sptests/spinternalerror02/spinternalerror02.scn @@ -17,7 +17,7 @@ INTERNAL_ERROR_BAD_STACK_HOOK INTERNAL_ERROR_BAD_ATTRIBUTES INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY OBSOLETE_INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL -INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE +INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0 OBSOLETE_INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP INTERNAL_ERROR_GXX_KEY_ADD_FAILED @@ -27,6 +27,7 @@ INTERNAL_ERROR_CPU_ISR_INSTALL_VECTOR INTERNAL_ERROR_RESOURCE_IN_USE INTERNAL_ERROR_RTEMS_INIT_TASK_ENTRY_IS_NULL INTERNAL_ERROR_POSIX_INIT_THREAD_ENTRY_IS_NULL +INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK ? ? INTERNAL_ERROR_CORE diff --git a/testsuites/sptests/spmutex01/init.c b/testsuites/sptests/spmutex01/init.c index 76d62c0..c363370 100644 --- a/testsuites/sptests/spmutex01/init.c +++ b/testsuites/sptests/spmutex01/init.c @@ -16,33 +16,65 @@ #include "config.h" #endif +#include <threads.h> +#include <setjmp.h> + +#include <rtems.h> +#include <rtems/libcsupport.h> + +#ifdef RTEMS_POSIX_API +#include <errno.h> +#include <pthread.h> +#endif + #include "tmacros.h" const char rtems_test_name[] = "SPMUTEX 1"; #define TASK_COUNT 5 +#define MTX_COUNT 3 + typedef enum { REQ_WAKE_UP_MASTER = RTEMS_EVENT_0, REQ_WAKE_UP_HELPER = RTEMS_EVENT_1, - REQ_MTX_OBTAIN = RTEMS_EVENT_2, - REQ_MTX_RELEASE = RTEMS_EVENT_3 + REQ_MTX_0_OBTAIN = RTEMS_EVENT_2, + REQ_MTX_0_RELEASE = RTEMS_EVENT_3, + REQ_MTX_1_OBTAIN = RTEMS_EVENT_4, + REQ_MTX_1_RELEASE = RTEMS_EVENT_5, + REQ_MTX_2_OBTAIN = RTEMS_EVENT_6, + REQ_MTX_2_RELEASE = RTEMS_EVENT_7, + REQ_MTX_C11_OBTAIN = RTEMS_EVENT_8, + REQ_MTX_C11_RELEASE = RTEMS_EVENT_9, + REQ_MTX_POSIX_OBTAIN = RTEMS_EVENT_10, + REQ_MTX_POSIX_RELEASE = RTEMS_EVENT_11 } request_id; typedef enum { + M, A_1, A_2_0, A_2_1, - M, H, NONE } task_id; +typedef enum { + MTX_0, + MTX_1, + MTX_2 +} mutex_id; + typedef struct { - rtems_id mtx; + rtems_id mtx[MTX_COUNT]; + mtx_t mtx_c11; +#ifdef RTEMS_POSIX_API + pthread_mutex_t mtx_posix; +#endif rtems_id tasks[TASK_COUNT]; int generation[TASK_COUNT]; int expected_generation[TASK_COUNT]; + jmp_buf deadlock_return_context; } test_context; static test_context test_instance; @@ -109,22 +141,79 @@ static void request(test_context *ctx, task_id id, request_id req) sync_with_helper(ctx); } -static void obtain(test_context *ctx) +static void obtain(test_context *ctx, mutex_id id) { rtems_status_code sc; - sc = rtems_semaphore_obtain(ctx->mtx, RTEMS_WAIT, RTEMS_NO_TIMEOUT); + sc = rtems_semaphore_obtain(ctx->mtx[id], RTEMS_WAIT, RTEMS_NO_TIMEOUT); rtems_test_assert(sc == RTEMS_SUCCESSFUL); } -static void release(test_context *ctx) +static void deadlock_obtain(test_context *ctx, mutex_id id) { rtems_status_code sc; - sc = rtems_semaphore_release(ctx->mtx); + sc = rtems_semaphore_obtain(ctx->mtx[id], RTEMS_WAIT, RTEMS_NO_TIMEOUT); + rtems_test_assert(sc == RTEMS_INCORRECT_STATE); +} + +static void release(test_context *ctx, mutex_id id) +{ + rtems_status_code sc; + + sc = rtems_semaphore_release(ctx->mtx[id]); rtems_test_assert(sc == RTEMS_SUCCESSFUL); } +static void obtain_c11(test_context *ctx) +{ + int status; + + status = mtx_lock(&ctx->mtx_c11); + rtems_test_assert(status == thrd_success); +} + +static void deadlock_obtain_c11(test_context *ctx) +{ + if (setjmp(ctx->deadlock_return_context) == 0) { + (void) mtx_lock(&ctx->mtx_c11); + } +} + +static void release_c11(test_context *ctx) +{ + int status; + + status = mtx_unlock(&ctx->mtx_c11); + rtems_test_assert(status == thrd_success); +} + +#ifdef RTEMS_POSIX_API +static void obtain_posix(test_context *ctx) +{ + int error; + + error = pthread_mutex_lock(&ctx->mtx_posix); + rtems_test_assert(error == 0); +} + +static void deadlock_obtain_posix(test_context *ctx) +{ + int error; + + error = pthread_mutex_lock(&ctx->mtx_posix); + rtems_test_assert(error == EDEADLK); +} + +static void release_posix(test_context *ctx) +{ + int error; + + error = pthread_mutex_unlock(&ctx->mtx_posix); + rtems_test_assert(error == 0); +} +#endif + static void check_generations(test_context *ctx, task_id a, task_id b) { size_t i; @@ -179,22 +268,65 @@ static void worker(rtems_task_argument arg) while (true) { rtems_event_set events = wait_for_events(); - if ((events & REQ_MTX_OBTAIN) != 0) { - obtain(ctx); + if ((events & REQ_MTX_0_OBTAIN) != 0) { + obtain(ctx, MTX_0); ++ctx->generation[id]; } - if ((events & REQ_MTX_RELEASE) != 0) { - release(ctx); + if ((events & REQ_MTX_0_RELEASE) != 0) { + release(ctx, MTX_0); ++ctx->generation[id]; } + + if ((events & REQ_MTX_1_OBTAIN) != 0) { + obtain(ctx, MTX_1); + ++ctx->generation[id]; + } + + if ((events & REQ_MTX_1_RELEASE) != 0) { + release(ctx, MTX_1); + ++ctx->generation[id]; + } + + if ((events & REQ_MTX_2_OBTAIN) != 0) { + obtain(ctx, MTX_2); + ++ctx->generation[id]; + } + + if ((events & REQ_MTX_2_RELEASE) != 0) { + release(ctx, MTX_2); + ++ctx->generation[id]; + } + + if ((events & REQ_MTX_C11_OBTAIN) != 0) { + obtain_c11(ctx); + ++ctx->generation[id]; + } + + if ((events & REQ_MTX_C11_RELEASE) != 0) { + release_c11(ctx); + ++ctx->generation[id]; + } + +#ifdef RTEMS_POSIX_API + if ((events & REQ_MTX_POSIX_OBTAIN) != 0) { + obtain_posix(ctx); + ++ctx->generation[id]; + } + + if ((events & REQ_MTX_POSIX_RELEASE) != 0) { + release_posix(ctx); + ++ctx->generation[id]; + } +#endif } } -static void test(void) +static void set_up(test_context *ctx) { - test_context *ctx = &test_instance; rtems_status_code sc; + int status; + size_t i; ctx->tasks[M] = rtems_task_self(); start_task(ctx, A_1, worker, 1); @@ -202,61 +334,264 @@ static void test(void) start_task(ctx, A_2_1, worker, 2); start_task(ctx, H, helper, 3); - sc = rtems_semaphore_create( - rtems_build_name(' ', 'M', 'T', 'X'), - 1, - RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY, - 0, - &ctx->mtx - ); - rtems_test_assert(sc == RTEMS_SUCCESSFUL); + for (i = 0; i < MTX_COUNT; ++i) { + sc = rtems_semaphore_create( + rtems_build_name(' ', 'M', 'T', 'X'), + 1, + RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY, + 0, + &ctx->mtx[i] + ); + rtems_test_assert(sc == RTEMS_SUCCESSFUL); + } - obtain(ctx); - request(ctx, A_1, REQ_MTX_OBTAIN); + status = mtx_init(&ctx->mtx_c11, mtx_plain); + rtems_test_assert(status == thrd_success); + +#ifdef RTEMS_POSIX_API + { + int error; + pthread_mutexattr_t attr; + + error = pthread_mutexattr_init(&attr); + rtems_test_assert(error == 0); + + error = pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT); + rtems_test_assert(error == 0); + + error = pthread_mutex_init(&ctx->mtx_posix, &attr); + rtems_test_assert(error == 0); + + error = pthread_mutexattr_destroy(&attr); + rtems_test_assert(error == 0); + } +#endif +} + +static void test_inherit(test_context *ctx) +{ + obtain(ctx, MTX_0); + request(ctx, A_1, REQ_MTX_0_OBTAIN); check_generations(ctx, NONE, NONE); assert_prio(ctx, M, 1); - release(ctx); + release(ctx, MTX_0); check_generations(ctx, A_1, NONE); assert_prio(ctx, M, 3); - request(ctx, A_1, REQ_MTX_RELEASE); + request(ctx, A_1, REQ_MTX_0_RELEASE); check_generations(ctx, A_1, NONE); +} - obtain(ctx); - request(ctx, A_2_0, REQ_MTX_OBTAIN); - request(ctx, A_1, REQ_MTX_OBTAIN); - request(ctx, A_2_1, REQ_MTX_OBTAIN); +static void test_inherit_fifo_for_equal_priority(test_context *ctx) +{ + obtain(ctx, MTX_0); + request(ctx, A_2_0, REQ_MTX_0_OBTAIN); + request(ctx, A_1, REQ_MTX_0_OBTAIN); + request(ctx, A_2_1, REQ_MTX_0_OBTAIN); check_generations(ctx, NONE, NONE); assert_prio(ctx, M, 1); - release(ctx); + release(ctx, MTX_0); check_generations(ctx, A_1, NONE); assert_prio(ctx, M, 3); assert_prio(ctx, A_1, 1); - request(ctx, A_1, REQ_MTX_RELEASE); + request(ctx, A_1, REQ_MTX_0_RELEASE); check_generations(ctx, A_1, A_2_0); - request(ctx, A_2_0, REQ_MTX_RELEASE); + request(ctx, A_2_0, REQ_MTX_0_RELEASE); check_generations(ctx, A_2_0, A_2_1); - request(ctx, A_2_1, REQ_MTX_RELEASE); + request(ctx, A_2_1, REQ_MTX_0_RELEASE); check_generations(ctx, A_2_1, NONE); } -static void Init(rtems_task_argument arg) +static void test_deadlock_two_classic(test_context *ctx) { - TEST_BEGIN(); + obtain(ctx, MTX_0); + request(ctx, A_1, REQ_MTX_1_OBTAIN); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_0_OBTAIN); + check_generations(ctx, NONE, NONE); + deadlock_obtain(ctx, MTX_1); + release(ctx, MTX_0); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_0_RELEASE); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_1_RELEASE); + check_generations(ctx, A_1, NONE); +} + +static void test_deadlock_three_classic(test_context *ctx) +{ + obtain(ctx, MTX_0); + request(ctx, A_1, REQ_MTX_1_OBTAIN); + check_generations(ctx, A_1, NONE); + request(ctx, A_2_0, REQ_MTX_2_OBTAIN); + check_generations(ctx, A_2_0, NONE); + request(ctx, A_2_0, REQ_MTX_1_OBTAIN); + check_generations(ctx, NONE, NONE); + request(ctx, A_1, REQ_MTX_0_OBTAIN); + check_generations(ctx, NONE, NONE); + deadlock_obtain(ctx, MTX_2); + release(ctx, MTX_0); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_0_RELEASE); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_1_RELEASE); + check_generations(ctx, A_1, A_2_0); + request(ctx, A_2_0, REQ_MTX_2_RELEASE); + check_generations(ctx, A_2_0, NONE); + request(ctx, A_2_0, REQ_MTX_1_RELEASE); + check_generations(ctx, A_2_0, NONE); +} + +static void test_deadlock_c11_and_classic(test_context *ctx) +{ + obtain_c11(ctx); + request(ctx, A_1, REQ_MTX_0_OBTAIN); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_C11_OBTAIN); + check_generations(ctx, NONE, NONE); + deadlock_obtain(ctx, MTX_0); + release_c11(ctx); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_C11_RELEASE); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_0_RELEASE); + check_generations(ctx, A_1, NONE); +} + +static void test_deadlock_classic_and_c11(test_context *ctx) +{ + obtain(ctx, MTX_0); + request(ctx, A_1, REQ_MTX_C11_OBTAIN); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_0_OBTAIN); + check_generations(ctx, NONE, NONE); + deadlock_obtain_c11(ctx); + release(ctx, MTX_0); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_0_RELEASE); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_C11_RELEASE); + check_generations(ctx, A_1, NONE); +} + +static void test_deadlock_posix_and_classic(test_context *ctx) +{ +#ifdef RTEMS_POSIX_API + obtain_posix(ctx); + request(ctx, A_1, REQ_MTX_0_OBTAIN); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_POSIX_OBTAIN); + check_generations(ctx, NONE, NONE); + deadlock_obtain(ctx, MTX_0); + release_posix(ctx); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_POSIX_RELEASE); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_0_RELEASE); + check_generations(ctx, A_1, NONE); +#endif +} + +static void test_deadlock_classic_and_posix(test_context *ctx) +{ +#ifdef RTEMS_POSIX_API + obtain(ctx, MTX_0); + request(ctx, A_1, REQ_MTX_POSIX_OBTAIN); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_0_OBTAIN); + check_generations(ctx, NONE, NONE); + deadlock_obtain_posix(ctx); + release(ctx, MTX_0); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_0_RELEASE); + check_generations(ctx, A_1, NONE); + request(ctx, A_1, REQ_MTX_POSIX_RELEASE); + check_generations(ctx, A_1, NONE); +#endif +} + +static void tear_down(test_context *ctx) +{ + rtems_status_code sc; + size_t i; + + for (i = 1; i < TASK_COUNT; ++i) { + sc = rtems_task_delete(ctx->tasks[i]); + rtems_test_assert(sc == RTEMS_SUCCESSFUL); + } + + for (i = 0; i < MTX_COUNT; ++i) { + sc = rtems_semaphore_delete(ctx->mtx[i]); + rtems_test_assert(sc == RTEMS_SUCCESSFUL); + } + + mtx_destroy(&ctx->mtx_c11); + +#ifdef RTEMS_POSIX_API + { + int error; + + error = pthread_mutex_destroy(&ctx->mtx_posix); + rtems_test_assert(error == 0); + } +#endif +} - test(); +static void Init(rtems_task_argument arg) +{ + test_context *ctx = &test_instance; + rtems_resource_snapshot snapshot; + TEST_BEGIN(); + rtems_resource_snapshot_take(&snapshot); + + set_up(ctx); + test_inherit(ctx); + test_inherit_fifo_for_equal_priority(ctx); + test_deadlock_two_classic(ctx); + test_deadlock_three_classic(ctx); + test_deadlock_c11_and_classic(ctx); + test_deadlock_classic_and_c11(ctx); + test_deadlock_posix_and_classic(ctx); + test_deadlock_classic_and_posix(ctx); + tear_down(ctx); + + rtems_test_assert(rtems_resource_snapshot_check(&snapshot)); TEST_END(); rtems_test_exit(0); } +static void fatal_extension( + rtems_fatal_source source, + bool is_internal, + rtems_fatal_code error +) +{ + + if ( + source == INTERNAL_ERROR_CORE + && !is_internal + && error == INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK + ) { + test_context *ctx = &test_instance; + + longjmp(ctx->deadlock_return_context, 1); + } +} + #define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER #define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER #define CONFIGURE_MAXIMUM_TASKS TASK_COUNT -#define CONFIGURE_MAXIMUM_SEMAPHORES 1 +#define CONFIGURE_MAXIMUM_SEMAPHORES 3 + +#ifdef RTEMS_POSIX_API +#define CONFIGURE_MAXIMUM_POSIX_MUTEXES 1 +#endif -#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION +#define CONFIGURE_INITIAL_EXTENSIONS \ + { .fatal = fatal_extension }, \ + RTEMS_TEST_INITIAL_EXTENSION #define CONFIGURE_INIT_TASK_PRIORITY 3 diff --git a/testsuites/sptests/spmutex01/spmutex01.doc b/testsuites/sptests/spmutex01/spmutex01.doc index 7951024..7bcb850 100644 --- a/testsuites/sptests/spmutex01/spmutex01.doc +++ b/testsuites/sptests/spmutex01/spmutex01.doc @@ -4,6 +4,10 @@ test set name: spmutex01 directives: + - mtx_lock() + - mtx_unlock() + - pthread_mutex_lock() + - pthread_mutex_unlock() - rtems_semaphore_create() - rtems_semaphore_obtain() - rtems_semaphore_release() @@ -12,3 +16,4 @@ concepts: - Ensure that priority inheritance mechanism works. - Ensure that thread priority queueing discipline works. + - Ensure that deadlock detection works in various combinations. diff --git a/testsuites/sptests/spsyslock01/init.c b/testsuites/sptests/spsyslock01/init.c index 5bf5d6a..1e0d4818 100644 --- a/testsuites/sptests/spsyslock01/init.c +++ b/testsuites/sptests/spsyslock01/init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 embedded brains GmbH. All rights reserved. + * Copyright (c) 2015, 2016 embedded brains GmbH. All rights reserved. * * embedded brains GmbH * Dornierstr. 4 @@ -21,7 +21,7 @@ #include <sys/lock.h> #include <errno.h> #include <limits.h> -#include <pthread.h> +#include <setjmp.h> #include <string.h> #include <time.h> @@ -35,8 +35,6 @@ const char rtems_test_name[] = "SPSYSLOCK 1"; #define EVENT_MTX_PRIO_INV RTEMS_EVENT_2 -#define EVENT_MTX_DEADLOCK RTEMS_EVENT_3 - #define EVENT_REC_MTX_ACQUIRE RTEMS_EVENT_4 #define EVENT_REC_MTX_RELEASE RTEMS_EVENT_5 @@ -56,7 +54,6 @@ typedef struct { rtems_id mid; rtems_id low; struct _Mutex_Control mtx; - struct _Mutex_Control deadlock_mtx; struct _Mutex_recursive_Control rec_mtx; struct _Condition_Control cond; struct _Semaphore_Control sem; @@ -65,6 +62,7 @@ typedef struct { int eno[2]; int generation[2]; int current_generation[2]; + jmp_buf deadlock_return_context; } test_context; static test_context test_instance; @@ -298,6 +296,19 @@ static void test_mtx_timeout_recursive(test_context *ctx) send_event(ctx, idx, EVENT_REC_MTX_RELEASE); } +static void test_mtx_deadlock(test_context *ctx) +{ + struct _Mutex_Control *mtx = &ctx->mtx; + + _Mutex_Acquire(mtx); + + if (setjmp(ctx->deadlock_return_context) == 0) { + _Mutex_Acquire(mtx); + } + + _Mutex_Release(mtx); +} + static void test_condition(test_context *ctx) { struct _Condition_Control *cond = &ctx->cond; @@ -493,21 +504,6 @@ static void mid_task(rtems_task_argument arg) rtems_test_assert(0); } -#ifdef RTEMS_POSIX_API -static void deadlock_cleanup(void *arg) -{ - struct _Mutex_Control *deadlock_mtx = arg; - - /* - * The thread terminate procedure will dequeue us from the wait queue. So, - * one release is sufficient. - */ - - _Mutex_Release(deadlock_mtx); - _Mutex_Destroy(deadlock_mtx); -} -#endif - static void high_task(rtems_task_argument idx) { test_context *ctx = &test_instance; @@ -553,22 +549,6 @@ static void high_task(rtems_task_argument idx) rtems_test_assert(sc == RTEMS_SUCCESSFUL); } - if ((events & EVENT_MTX_DEADLOCK) != 0) { - struct _Mutex_Control *deadlock_mtx = &ctx->deadlock_mtx; - -#ifdef RTEMS_POSIX_API - pthread_cleanup_push(deadlock_cleanup, deadlock_mtx); -#endif - - _Mutex_Initialize(deadlock_mtx); - _Mutex_Acquire(deadlock_mtx); - _Mutex_Acquire(deadlock_mtx); - -#ifdef RTEMS_POSIX_API - pthread_cleanup_pop(0); -#endif - } - if ((events & EVENT_REC_MTX_ACQUIRE) != 0) { _Mutex_recursive_Acquire(&ctx->rec_mtx); } @@ -670,6 +650,7 @@ static void test(void) test_prio_inv_recursive(ctx); test_mtx_timeout_normal(ctx); test_mtx_timeout_recursive(ctx); + test_mtx_deadlock(ctx); test_condition(ctx); test_condition_timeout(ctx); test_sem(ctx); @@ -677,15 +658,11 @@ static void test(void) test_futex(ctx); test_sched(); - send_event(ctx, 0, EVENT_MTX_DEADLOCK); - sc = rtems_task_delete(ctx->mid); rtems_test_assert(sc == RTEMS_SUCCESSFUL); -#ifdef RTEMS_POSIX_API sc = rtems_task_delete(ctx->high[0]); rtems_test_assert(sc == RTEMS_SUCCESSFUL); -#endif sc = rtems_task_delete(ctx->high[1]); rtems_test_assert(sc == RTEMS_SUCCESSFUL); @@ -707,6 +684,24 @@ static void Init(rtems_task_argument arg) rtems_test_exit(0); } +static void fatal_extension( + rtems_fatal_source source, + bool is_internal, + rtems_fatal_code error +) +{ + + if ( + source == INTERNAL_ERROR_CORE + && !is_internal + && error == INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK + ) { + test_context *ctx = &test_instance; + + longjmp(ctx->deadlock_return_context, 1); + } +} + #define CONFIGURE_MICROSECONDS_PER_TICK US_PER_TICK #define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER @@ -714,7 +709,9 @@ static void Init(rtems_task_argument arg) #define CONFIGURE_MAXIMUM_TASKS 4 -#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION +#define CONFIGURE_INITIAL_EXTENSIONS \ + { .fatal = fatal_extension }, \ + RTEMS_TEST_INITIAL_EXTENSION #define CONFIGURE_INIT_TASK_PRIORITY 4 #define CONFIGURE_INIT_TASK_INITIAL_MODES RTEMS_DEFAULT_MODES -- 1.8.4.5 _______________________________________________ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel