[PATCH-V2 0/7] Changes to SMP Cache manager patches
Renamed _SMP_Broadcast_message and _SMP_Send_message_cpu_set to _SMP_Send_message_broadcast and _SMP_Send_message_multicast and changed the broadcast type to unsigned long. Broadcast no longer uses multicast. Now supports arbitrarily sized cpu sets. Dropped use of function ids in smp cache manager. Prevent thread dispatch in smp cache manager to avoid same core trying to take the same lock twice. Use TAS-lock instead of ticket lock. If we fail to take the lock, and interrupts are disabled, another core might be waiting for us to service a smp message. So each time we fail we also look at the per cpu message bitmask to see if there is cache smp message for us. Added warning about using the smp cache manager from interrupt context. No extra function for instruction cache invalidation. If RTEMS_SMP is defined and the BSP has defined CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING then the normal instruction cache invalidation operation will invalidate all processors instruction cache. This removes the need to patch _CPU_ISR_install_raw_handler If the state is not up, all cache operations will be local. It is up to each core to flush their caches if needed before starting to service interrupts or tasks. Added an extra error message to differentiate between the main processor not having data cache snooping enabled and a secondary processor not having data cache snooping enabled. Daniel Cederman (7): score: Add function to send a SMP message to a set of CPUs score: Rename SMP broadcast message function score: Add SMP support to the cache manager bsp/sparc: Flush icache before first time enabling interrupts score/sparc: Add comment on icache flush after trap table update bsp/sparc: Ensure that data cache snooping is enabled bsp/sparc: Flush only instruction cache c/src/lib/libbsp/shared/include/fatal.h |2 + c/src/lib/libbsp/sparc/leon3/include/cache_.h |7 +- c/src/lib/libbsp/sparc/leon3/include/leon.h | 11 ++ c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c | 17 ++- c/src/lib/libcpu/shared/src/cache_manager.c | 200 - cpukit/rtems/include/rtems/rtems/cache.h | 88 +++ cpukit/score/cpu/sparc/cpu.c | 12 +- cpukit/score/cpu/sparc/rtems/score/cpu.h |4 + cpukit/score/include/rtems/score/smpimpl.h| 32 +++- cpukit/score/src/smp.c| 18 ++- 10 files changed, 376 insertions(+), 15 deletions(-) -- 1.7.9.5 ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
[PATCH-V2 6/7] bsp/sparc: Ensure that data cache snooping is enabled
Check that data cache snooping exists and is enabled on all cores. --- c/src/lib/libbsp/shared/include/fatal.h |2 ++ c/src/lib/libbsp/sparc/leon3/include/leon.h | 10 ++ c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c |8 ++-- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/c/src/lib/libbsp/shared/include/fatal.h b/c/src/lib/libbsp/shared/include/fatal.h index e928bba..99da207 100644 --- a/c/src/lib/libbsp/shared/include/fatal.h +++ b/c/src/lib/libbsp/shared/include/fatal.h @@ -49,6 +49,8 @@ typedef enum { /* LEON3 fatal codes */ LEON3_FATAL_NO_IRQMP_CONTROLLER = BSP_FATAL_CODE_BLOCK(2), LEON3_FATAL_CONSOLE_REGISTER_DEV, + LEON3_FATAL_INVALID_CACHE_CONFIG_MAIN_PROCESSOR, + LEON3_FATAL_INVALID_CACHE_CONFIG_SECONDARY_PROCESSOR, /* LPC24XX fatal codes */ LPC24XX_FATAL_PL111_SET_UP = BSP_FATAL_CODE_BLOCK(3), diff --git a/c/src/lib/libbsp/sparc/leon3/include/leon.h b/c/src/lib/libbsp/sparc/leon3/include/leon.h index d7048f3..a62ad29 100644 --- a/c/src/lib/libbsp/sparc/leon3/include/leon.h +++ b/c/src/lib/libbsp/sparc/leon3/include/leon.h @@ -86,6 +86,11 @@ extern "C" { #define LEON_REG_TIMER_CONTROL_LD0x0004 /* 1 = load counter */ /* 0 = no function */ +/* + * The following defines the bits in the LEON Cache Control Register. + */ +#define LEON3_REG_CACHE_CTRL_DS 0x0080 /* Data cache snooping */ + /* LEON3 Interrupt Controller */ extern volatile struct irqmp_regs *LEON3_IrqCtrl_Regs; /* LEON3 GP Timer */ @@ -347,6 +352,11 @@ static inline uint32_t leon3_get_cache_control_register(void) return leon3_get_system_register(0x0); } +static inline bool leon3_data_cache_snooping_enabled(void) +{ + return leon3_get_cache_control_register() & LEON3_REG_CACHE_CTRL_DS; +} + static inline uint32_t leon3_get_inst_cache_config_register(void) { return leon3_get_system_register(0x8); diff --git a/c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c b/c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c index 9166ad5..312488d 100644 --- a/c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c +++ b/c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -39,7 +40,9 @@ void bsp_start_on_secondary_processor() { uint32_t cpu_index_self = _CPU_SMP_Get_current_processor(); - leon3_set_cache_control_register(0x8F); + if ( ! leon3_data_cache_snooping_enabled() ) +BSP_fatal_exit( LEON3_FATAL_INVALID_CACHE_CONFIG_SECONDARY_PROCESSOR ); + /* Unmask IPI interrupts at Interrupt controller for this CPU */ LEON3_IrqCtrl_Regs->mask[cpu_index_self] |= 1U << LEON3_MP_IRQ; @@ -48,7 +51,8 @@ void bsp_start_on_secondary_processor() uint32_t _CPU_SMP_Initialize( void ) { - leon3_set_cache_control_register(0x8F); + if ( ! leon3_data_cache_snooping_enabled() ) +bsp_fatal( LEON3_FATAL_INVALID_CACHE_CONFIG_MAIN_PROCESSOR ); if ( rtems_configuration_get_maximum_processors() > 1 ) { LEON_Unmask_interrupt(LEON3_MP_IRQ); -- 1.7.9.5 ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
[PATCH-V2 1/7] score: Add function to send a SMP message to a set of CPUs
--- cpukit/score/include/rtems/score/smpimpl.h | 15 +++ cpukit/score/src/smp.c | 16 2 files changed, 31 insertions(+) diff --git a/cpukit/score/include/rtems/score/smpimpl.h b/cpukit/score/include/rtems/score/smpimpl.h index e2fee39..d49f88f 100644 --- a/cpukit/score/include/rtems/score/smpimpl.h +++ b/cpukit/score/include/rtems/score/smpimpl.h @@ -175,6 +175,21 @@ void _SMP_Broadcast_message( uint32_t message ); +/** + * @brief Sends a SMP message to a set of processors. + * + * The sending processor may be part of the set. + * + * @param[in] setsize The size of the set of target processors of the message. + * @param[in] cpus The set of target processors of the message. + * @param[in] message The message. + */ +void _SMP_Send_message_multicast( + const size_t setsize, + const cpu_set_t *cpus, + unsigned long message +); + #endif /* defined( RTEMS_SMP ) */ /** diff --git a/cpukit/score/src/smp.c b/cpukit/score/src/smp.c index f0554fe..7140664 100644 --- a/cpukit/score/src/smp.c +++ b/cpukit/score/src/smp.c @@ -177,4 +177,20 @@ void _SMP_Broadcast_message( uint32_t message ) } } +void _SMP_Send_message_multicast( +const size_t setsize, +const cpu_set_t *cpus, +unsigned long message +) +{ + uint32_t cpu_count = _SMP_Get_processor_count(); + uint32_t cpu_index; + + for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) { +if ( CPU_ISSET_S( cpu_index, setsize, cpus ) ) { + _SMP_Send_message( cpu_index, message ); +} + } +} + SMP_Test_message_handler _SMP_Test_message_handler; -- 1.7.9.5 ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
[PATCH-V2 7/7] bsp/sparc: Flush only instruction cache
The flush instruction on LEON flushes both the data and the instruction cache. Flushing of just the instruction cache can be done by setting the "flush instruction cache" bit in the cache control register. --- c/src/lib/libbsp/sparc/leon3/include/cache_.h |5 - c/src/lib/libbsp/sparc/leon3/include/leon.h |1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/c/src/lib/libbsp/sparc/leon3/include/cache_.h b/c/src/lib/libbsp/sparc/leon3/include/cache_.h index 63790c1..ced5b6d 100644 --- a/c/src/lib/libbsp/sparc/leon3/include/cache_.h +++ b/c/src/lib/libbsp/sparc/leon3/include/cache_.h @@ -136,7 +136,10 @@ static inline void _CPU_cache_unfreeze_data(void) static inline void _CPU_cache_invalidate_entire_instruction(void) { - __asm__ volatile ("flush"); + uint32_t cache_reg = leon3_get_cache_control_register(); + + cache_reg |= LEON3_REG_CACHE_CTRL_FI; + leon3_set_cache_control_register(cache_reg); } static inline void _CPU_cache_invalidate_instruction_range( diff --git a/c/src/lib/libbsp/sparc/leon3/include/leon.h b/c/src/lib/libbsp/sparc/leon3/include/leon.h index a62ad29..bc3cdde 100644 --- a/c/src/lib/libbsp/sparc/leon3/include/leon.h +++ b/c/src/lib/libbsp/sparc/leon3/include/leon.h @@ -90,6 +90,7 @@ extern "C" { * The following defines the bits in the LEON Cache Control Register. */ #define LEON3_REG_CACHE_CTRL_DS 0x0080 /* Data cache snooping */ +#define LEON3_REG_CACHE_CTRL_FI 0x0020 /* Flush instruction cache */ /* LEON3 Interrupt Controller */ extern volatile struct irqmp_regs *LEON3_IrqCtrl_Regs; -- 1.7.9.5 ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
[PATCH-V2 5/7] score/sparc: Add comment on icache flush after trap table update
Changes to the trap table might be missed by other cores. If the system state is up, the other cores can be notified using SMP messages that they need to flush their icache. If the up state has not been reached there is no need to notify other cores. They will do an automatic flush of the icache just after entering the up state, but before enabling interrupts. --- cpukit/score/cpu/sparc/cpu.c | 12 +--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/cpukit/score/cpu/sparc/cpu.c b/cpukit/score/cpu/sparc/cpu.c index c616de4..88228b7 100644 --- a/cpukit/score/cpu/sparc/cpu.c +++ b/cpukit/score/cpu/sparc/cpu.c @@ -210,10 +210,16 @@ void _CPU_ISR_install_raw_handler( (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT; slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK); - /* need to flush icache after this !!! */ - + /* + * Changes to the trap table might be missed by other cores. + * If the system state is up, the other cores can be notified + * using SMP messages that they need to flush their icache. + * If the up state has not been reached there is no need to + * notify other cores. They will do an automatic flush of the + * icache just after entering the up state, but before enabling + * interrupts. + */ rtems_cache_invalidate_entire_instruction(); - } void _CPU_ISR_install_vector( -- 1.7.9.5 ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
[PATCH-V2 2/7] score: Rename SMP broadcast message function
Change message type to unsigned long to match other SMP message functions. --- cpukit/score/include/rtems/score/smpimpl.h |4 ++-- cpukit/score/src/smp.c |2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cpukit/score/include/rtems/score/smpimpl.h b/cpukit/score/include/rtems/score/smpimpl.h index d49f88f..cbc6428 100644 --- a/cpukit/score/include/rtems/score/smpimpl.h +++ b/cpukit/score/include/rtems/score/smpimpl.h @@ -171,8 +171,8 @@ void _SMP_Send_message( uint32_t cpu_index, unsigned long message ); * * @param [in] message is message to send */ -void _SMP_Broadcast_message( - uint32_t message +void _SMP_Send_message_broadcast( + unsigned long message ); /** diff --git a/cpukit/score/src/smp.c b/cpukit/score/src/smp.c index 7140664..09246e3 100644 --- a/cpukit/score/src/smp.c +++ b/cpukit/score/src/smp.c @@ -162,7 +162,7 @@ void _SMP_Send_message( uint32_t cpu_index, unsigned long message ) _CPU_SMP_Send_interrupt( cpu_index ); } -void _SMP_Broadcast_message( uint32_t message ) +void _SMP_Send_message_broadcast( unsigned long message ) { uint32_t cpu_count = _SMP_Get_processor_count(); uint32_t cpu_index_self = _SMP_Get_current_processor(); -- 1.7.9.5 ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
[PATCH-V2 3/7] score: Add SMP support to the cache manager
Adds functions that allows the user to specify which cores that should perform the cache operation. SMP messages are sent to all the specified cores and the caller waits until all cores have acknowledged that they have flushed their cache. If CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING is defined the instruction cache invalidation function will perform the operation on all cores using the previous method. --- c/src/lib/libbsp/sparc/leon3/include/cache_.h |2 + c/src/lib/libcpu/shared/src/cache_manager.c | 200 - cpukit/rtems/include/rtems/rtems/cache.h | 88 +++ cpukit/score/include/rtems/score/smpimpl.h| 13 ++ 4 files changed, 297 insertions(+), 6 deletions(-) diff --git a/c/src/lib/libbsp/sparc/leon3/include/cache_.h b/c/src/lib/libbsp/sparc/leon3/include/cache_.h index 70c1e2c..63790c1 100644 --- a/c/src/lib/libbsp/sparc/leon3/include/cache_.h +++ b/c/src/lib/libbsp/sparc/leon3/include/cache_.h @@ -26,6 +26,8 @@ extern "C" { #define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS +#define CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING + #define CPU_INSTRUCTION_CACHE_ALIGNMENT 64 #define CPU_DATA_CACHE_ALIGNMENT 64 diff --git a/c/src/lib/libcpu/shared/src/cache_manager.c b/c/src/lib/libcpu/shared/src/cache_manager.c index 420a013..da57c12 100644 --- a/c/src/lib/libcpu/shared/src/cache_manager.c +++ b/c/src/lib/libcpu/shared/src/cache_manager.c @@ -37,6 +37,156 @@ #include #include "cache_.h" +#include +#include +#include + +#if defined( RTEMS_SMP ) + +typedef void (*Cache_manager_Function_ptr)(const void *d_addr, size_t n_bytes); + +typedef struct { + Atomic_Flag lock; + Cache_manager_Function_ptr func; + Atomic_Uint count; + const void *addr; + size_t size; +} Cache_manager_SMP_control; + +static Cache_manager_SMP_control _CM_SMP = { + .lock = ATOMIC_INITIALIZER_FLAG, + .count = ATOMIC_INITIALIZER_UINT(0) +}; + +void +_SMP_Cache_manager_message_handler(void) +{ + _CM_SMP.func( _CM_SMP.addr, _CM_SMP.size ); + _Atomic_Fetch_add_uint( &_CM_SMP.count, 1, ATOMIC_ORDER_RELEASE ); +} + +#if defined(CPU_DATA_CACHE_ALIGNMENT) || \ +(defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) && \ +defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)) + +static void +_cache_manager_process_cache_messages( void ) +{ + unsigned long message; + Per_CPU_Control *cpu_self = _Per_CPU_Get(); + + message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED ); + + if ( message & SMP_MESSAGE_CACHE_MANAGER) { +if ( _Atomic_Compare_exchange_ulong( &cpu_self->message, &message, +message & ~SMP_MESSAGE_CACHE_MANAGER, ATOMIC_ORDER_RELAXED, +ATOMIC_ORDER_RELAXED ) ) { + _SMP_Cache_manager_message_handler(); +} + } +} + +static void +_cache_manager_send_smp_msg( +const size_t setsize, +const cpu_set_t *set, +Cache_manager_Function_ptr func, +const void * addr, +size_t size + ) +{ + uint32_t cpu_count = 0; + + if ( ! _System_state_Is_up( _System_state_Get() ) ) { +func( addr, size ); +return; + } + + if ( set == NULL ) +cpu_count = _SMP_Get_processor_count(); + else +cpu_count = CPU_COUNT_S( setsize, set ); + + _Thread_Disable_dispatch(); + + while ( _Atomic_Flag_test_and_set( &_CM_SMP.lock, ATOMIC_ORDER_ACQUIRE ) ) +_cache_manager_process_cache_messages(); + + _CM_SMP.func = func; + _CM_SMP.addr = addr; + _CM_SMP.size = size; + _Atomic_Store_uint( &_CM_SMP.count, 0, ATOMIC_ORDER_RELEASE ); + _Atomic_Fence( ATOMIC_ORDER_RELEASE ); + + if ( set == NULL ) { +_SMP_Send_message_broadcast( SMP_MESSAGE_CACHE_MANAGER ); +_SMP_Cache_manager_message_handler(); + } else { +_SMP_Send_message_multicast( setsize, set, SMP_MESSAGE_CACHE_MANAGER ); +_cache_manager_process_cache_messages(); + } + + while ( _Atomic_Load_uint( &_CM_SMP.count, ATOMIC_ORDER_ACQUIRE ) + != cpu_count ); + + _Atomic_Flag_clear( &_CM_SMP.lock, ATOMIC_ORDER_RELEASE ); + + _Thread_Enable_dispatch(); +} +#endif + +void +rtems_cache_flush_multiple_data_lines_processor_set( + const void *addr, + size_t size, + const size_t setsize, + const cpu_set_t *set +) +{ +#if defined(CPU_DATA_CACHE_ALIGNMENT) + _cache_manager_send_smp_msg( setsize, set, + rtems_cache_flush_multiple_data_lines, addr, size ); +#endif +} + +void +rtems_cache_invalidate_multiple_data_lines_processor_set( + const void *addr, + size_t size, + const size_t setsize, + const cpu_set_t *set +) +{ +#if defined(CPU_DATA_CACHE_ALIGNMENT) + _cache_manager_send_smp_msg( setsize, set, + rtems_cache_invalidate_multiple_data_lines, addr, size ); +#endif +} + +void +rtems_cache_flush_entire_data_processor_set( + const size_t setsize, + const cpu_set_t *set +) +{ +#if defined(CPU_DATA_CACHE_ALIGNMENT) + _cache_manager_send_smp_msg( setsize, set, + (Cache_manager_Function_ptr)rtems_cache_flush_entire_data, 0, 0 ); +#endif +} + +void +rtems_cache_invalidate_entire_data_processor_set( + const size_t setsize, + const cp
[PATCH-V2 4/7] bsp/sparc: Flush icache before first time enabling interrupts
A secondary processor might miss changes done to the trap table if the instruction cache is not flushed. Once interrupts are enabled any other required cache flushes can be ordered via the cache manager. --- c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c |9 + cpukit/score/cpu/sparc/rtems/score/cpu.h |4 2 files changed, 13 insertions(+) diff --git a/c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c b/c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c index 567eecc..9166ad5 100644 --- a/c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c +++ b/c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -80,3 +81,11 @@ void _CPU_SMP_Send_interrupt(uint32_t target_processor_index) /* send interrupt to destination CPU */ LEON3_IrqCtrl_Regs->force[target_processor_index] = 1 << LEON3_MP_IRQ; } + +void _BSP_Start_multitasking( + Context_Control *heir +) +{ + _CPU_cache_invalidate_entire_instruction(); + _CPU_Context_Restart_self( heir ); +} diff --git a/cpukit/score/cpu/sparc/rtems/score/cpu.h b/cpukit/score/cpu/sparc/rtems/score/cpu.h index c010005..4a08441 100644 --- a/cpukit/score/cpu/sparc/rtems/score/cpu.h +++ b/cpukit/score/cpu/sparc/rtems/score/cpu.h @@ -1203,6 +1203,10 @@ register struct Per_CPU_Control *_SPARC_Per_CPU_current __asm__( "g6" ); void _CPU_SMP_Send_interrupt( uint32_t target_processor_index ); + void _BSP_Start_multitasking( Context_Control *heir ) +RTEMS_COMPILER_NO_RETURN_ATTRIBUTE; + #define _CPU_Start_multitasking _BSP_Start_multitasking + static inline void _CPU_SMP_Processor_event_broadcast( void ) { __asm__ volatile ( "" : : : "memory" ); -- 1.7.9.5 ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
Re: [PATCH-V2 6/7] bsp/sparc: Ensure that data cache snooping is enabled
On 2014-07-09 09:02, Daniel Cederman wrote: Check that data cache snooping exists and is enabled on all cores. --- c/src/lib/libbsp/shared/include/fatal.h |2 ++ c/src/lib/libbsp/sparc/leon3/include/leon.h | 10 ++ c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c |8 ++-- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/c/src/lib/libbsp/shared/include/fatal.h b/c/src/lib/libbsp/shared/include/fatal.h index e928bba..99da207 100644 --- a/c/src/lib/libbsp/shared/include/fatal.h +++ b/c/src/lib/libbsp/shared/include/fatal.h @@ -49,6 +49,8 @@ typedef enum { /* LEON3 fatal codes */ LEON3_FATAL_NO_IRQMP_CONTROLLER = BSP_FATAL_CODE_BLOCK(2), LEON3_FATAL_CONSOLE_REGISTER_DEV, + LEON3_FATAL_INVALID_CACHE_CONFIG_MAIN_PROCESSOR, + LEON3_FATAL_INVALID_CACHE_CONFIG_SECONDARY_PROCESSOR, /* LPC24XX fatal codes */ LPC24XX_FATAL_PL111_SET_UP = BSP_FATAL_CODE_BLOCK(3), diff --git a/c/src/lib/libbsp/sparc/leon3/include/leon.h b/c/src/lib/libbsp/sparc/leon3/include/leon.h index d7048f3..a62ad29 100644 --- a/c/src/lib/libbsp/sparc/leon3/include/leon.h +++ b/c/src/lib/libbsp/sparc/leon3/include/leon.h @@ -86,6 +86,11 @@ extern "C" { #define LEON_REG_TIMER_CONTROL_LD0x0004 /* 1 = load counter */ /* 0 = no function */ +/* + * The following defines the bits in the LEON Cache Control Register. + */ +#define LEON3_REG_CACHE_CTRL_DS 0x0080 /* Data cache snooping */ + /* LEON3 Interrupt Controller */ extern volatile struct irqmp_regs *LEON3_IrqCtrl_Regs; /* LEON3 GP Timer */ @@ -347,6 +352,11 @@ static inline uint32_t leon3_get_cache_control_register(void) return leon3_get_system_register(0x0); } +static inline bool leon3_data_cache_snooping_enabled(void) +{ + return leon3_get_cache_control_register() & LEON3_REG_CACHE_CTRL_DS; +} + static inline uint32_t leon3_get_inst_cache_config_register(void) { return leon3_get_system_register(0x8); diff --git a/c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c b/c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c index 9166ad5..312488d 100644 --- a/c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c +++ b/c/src/lib/libbsp/sparc/leon3/startup/bspsmp.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -39,7 +40,9 @@ void bsp_start_on_secondary_processor() { uint32_t cpu_index_self = _CPU_SMP_Get_current_processor(); - leon3_set_cache_control_register(0x8F); + if ( ! leon3_data_cache_snooping_enabled() ) +BSP_fatal_exit( LEON3_FATAL_INVALID_CACHE_CONFIG_SECONDARY_PROCESSOR ); bsp_fatal()? + /* Unmask IPI interrupts at Interrupt controller for this CPU */ LEON3_IrqCtrl_Regs->mask[cpu_index_self] |= 1U << LEON3_MP_IRQ; @@ -48,7 +51,8 @@ void bsp_start_on_secondary_processor() uint32_t _CPU_SMP_Initialize( void ) { - leon3_set_cache_control_register(0x8F); + if ( ! leon3_data_cache_snooping_enabled() ) +bsp_fatal( LEON3_FATAL_INVALID_CACHE_CONFIG_MAIN_PROCESSOR ); if ( rtems_configuration_get_maximum_processors() > 1 ) { LEON_Unmask_interrupt(LEON3_MP_IRQ); -- Sebastian Huber, embedded brains GmbH Address : Dornierstr. 4, D-82178 Puchheim, Germany Phone : +49 89 189 47 41-16 Fax : +49 89 189 47 41-09 E-Mail : sebastian.hu...@embedded-brains.de PGP : Public key available on request. Diese Nachricht ist keine geschäftliche Mitteilung im Sinne des EHUG. ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
Re: [PATCH-V2 3/7] score: Add SMP support to the cache manager
The new cache manager functions should have tests, see also http://git.rtems.org/rtems/tree/testsuites/sptests/spcache01/init.c On 2014-07-09 09:02, Daniel Cederman wrote: Adds functions that allows the user to specify which cores that should perform the cache operation. SMP messages are sent to all the specified cores and the caller waits until all cores have acknowledged that they have flushed their cache. If CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING is defined the instruction cache invalidation function will perform the operation on all cores using the previous method. --- c/src/lib/libbsp/sparc/leon3/include/cache_.h |2 + c/src/lib/libcpu/shared/src/cache_manager.c | 200 - cpukit/rtems/include/rtems/rtems/cache.h | 88 +++ cpukit/score/include/rtems/score/smpimpl.h| 13 ++ 4 files changed, 297 insertions(+), 6 deletions(-) diff --git a/c/src/lib/libbsp/sparc/leon3/include/cache_.h b/c/src/lib/libbsp/sparc/leon3/include/cache_.h index 70c1e2c..63790c1 100644 --- a/c/src/lib/libbsp/sparc/leon3/include/cache_.h +++ b/c/src/lib/libbsp/sparc/leon3/include/cache_.h @@ -26,6 +26,8 @@ extern "C" { #define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS +#define CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING + #define CPU_INSTRUCTION_CACHE_ALIGNMENT 64 #define CPU_DATA_CACHE_ALIGNMENT 64 diff --git a/c/src/lib/libcpu/shared/src/cache_manager.c b/c/src/lib/libcpu/shared/src/cache_manager.c index 420a013..da57c12 100644 --- a/c/src/lib/libcpu/shared/src/cache_manager.c +++ b/c/src/lib/libcpu/shared/src/cache_manager.c This file should follow the coding and naming conventions: http://www.rtems.org/wiki/index.php/Coding_Conventions @@ -37,6 +37,156 @@ #include #include "cache_.h" +#include +#include +#include + +#if defined( RTEMS_SMP ) + +typedef void (*Cache_manager_Function_ptr)(const void *d_addr, size_t n_bytes); + +typedef struct { + Atomic_Flag lock; + Cache_manager_Function_ptr func; + Atomic_Uint count; + const void *addr; + size_t size; +} Cache_manager_SMP_control; + +static Cache_manager_SMP_control _CM_SMP = { + .lock = ATOMIC_INITIALIZER_FLAG, + .count = ATOMIC_INITIALIZER_UINT(0) +}; + +void +_SMP_Cache_manager_message_handler(void) +{ + _CM_SMP.func( _CM_SMP.addr, _CM_SMP.size ); + _Atomic_Fetch_add_uint( &_CM_SMP.count, 1, ATOMIC_ORDER_RELEASE ); +} + +#if defined(CPU_DATA_CACHE_ALIGNMENT) || \ +(defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) && \ +defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)) + +static void +_cache_manager_process_cache_messages( void ) +{ + unsigned long message; + Per_CPU_Control *cpu_self = _Per_CPU_Get(); + + message = _Atomic_Load_ulong( &cpu_self->message, ATOMIC_ORDER_RELAXED ); + + if ( message & SMP_MESSAGE_CACHE_MANAGER) { +if ( _Atomic_Compare_exchange_ulong( &cpu_self->message, &message, +message & ~SMP_MESSAGE_CACHE_MANAGER, ATOMIC_ORDER_RELAXED, +ATOMIC_ORDER_RELAXED ) ) { + _SMP_Cache_manager_message_handler(); +} + } +} + +static void +_cache_manager_send_smp_msg( +const size_t setsize, +const cpu_set_t *set, +Cache_manager_Function_ptr func, +const void * addr, +size_t size + ) +{ + uint32_t cpu_count = 0; + + if ( ! _System_state_Is_up( _System_state_Get() ) ) { +func( addr, size ); +return; + } + + if ( set == NULL ) +cpu_count = _SMP_Get_processor_count(); + else +cpu_count = CPU_COUNT_S( setsize, set ); + + _Thread_Disable_dispatch(); This will not work since _Thread_Disable_dispatch() obtains the Giant lock. Other processors acquiring the Giant lock will do this with interrupts disabled, thus you cannot make progress ... + + while ( _Atomic_Flag_test_and_set( &_CM_SMP.lock, ATOMIC_ORDER_ACQUIRE ) ) +_cache_manager_process_cache_messages(); + + _CM_SMP.func = func; + _CM_SMP.addr = addr; + _CM_SMP.size = size; + _Atomic_Store_uint( &_CM_SMP.count, 0, ATOMIC_ORDER_RELEASE ); + _Atomic_Fence( ATOMIC_ORDER_RELEASE ); + + if ( set == NULL ) { +_SMP_Send_message_broadcast( SMP_MESSAGE_CACHE_MANAGER ); +_SMP_Cache_manager_message_handler(); + } else { +_SMP_Send_message_multicast( setsize, set, SMP_MESSAGE_CACHE_MANAGER ); +_cache_manager_process_cache_messages(); + } + + while ( _Atomic_Load_uint( &_CM_SMP.count, ATOMIC_ORDER_ACQUIRE ) + != cpu_count ); ... here. + + _Atomic_Flag_clear( &_CM_SMP.lock, ATOMIC_ORDER_RELEASE ); + + _Thread_Enable_dispatch(); +} +#endif + +void +rtems_cache_flush_multiple_data_lines_processor_set( + const void *addr, + size_t size, + const size_t setsize, + const cpu_set_t *set +) +{ +#if defined(CPU_DATA_CACHE_ALIGNMENT) + _cache_manager_send_smp_msg( setsize, set, + rtems_cache_flush_multiple_data_lines, addr, size ); +#endif +} + +void +rtems_cache_invalidate_multiple_data_lines_processor_set( + const void *addr, + size_t size, + const size_t setsize, + const cpu_set_t *set +) +{ +#if defined(CPU_DATA_
Re: [PATCH 4/6] spintrcritical20: Fix incorrect assumption
On 2014-07-08 22:52, Joel Sherrill wrote: The test assumed that the thread would have enough time to block and become enqueued. In fact, the thread would still be in the ready state and not blocked on the semaphore. Thus the state of the Wait sub-structure in the TCB would not be in the expected state. The simple solution was to continue when the thread was in the ready state. --- testsuites/sptests/spintrcritical20/init.c |4 1 files changed, 4 insertions(+), 0 deletions(-) diff --git a/testsuites/sptests/spintrcritical20/init.c b/testsuites/sptests/spintrcritical20/init.c index cae8fdb..209c9e5 100644 --- a/testsuites/sptests/spintrcritical20/init.c +++ b/testsuites/sptests/spintrcritical20/init.c @@ -20,6 +20,7 @@ #include #include #include +#include const char rtems_test_name[] = "SPINTRCRITICAL 20"; @@ -108,6 +109,9 @@ static void Init(rtems_task_argument ignored) ++resets; } +if (ctx->semaphore_task_tcb->current_state == STATES_READY) + continue; In case the semaphore task is ready at this point, then we have a massive problem since the semaphore task has a higher priority. + _Thread_Disable_dispatch(); Should this check move to here? rtems_test_assert( -- Sebastian Huber, embedded brains GmbH Address : Dornierstr. 4, D-82178 Puchheim, Germany Phone : +49 89 189 47 41-16 Fax : +49 89 189 47 41-09 E-Mail : sebastian.hu...@embedded-brains.de PGP : Public key available on request. Diese Nachricht ist keine geschäftliche Mitteilung im Sinne des EHUG. ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
Re: [PATCH-V2 5/7] score/sparc: Add comment on icache flush after trap table update
On Wed, Jul 9, 2014 at 3:02 AM, Daniel Cederman wrote: > Changes to the trap table might be missed by other cores. > If the system state is up, the other cores can be notified > using SMP messages that they need to flush their icache. > If the up state has not been reached there is no need to > notify other cores. They will do an automatic flush of the > icache just after entering the up state, but before enabling > interrupts. > --- > cpukit/score/cpu/sparc/cpu.c | 12 +--- > 1 file changed, 9 insertions(+), 3 deletions(-) > > diff --git a/cpukit/score/cpu/sparc/cpu.c b/cpukit/score/cpu/sparc/cpu.c > index c616de4..88228b7 100644 > --- a/cpukit/score/cpu/sparc/cpu.c > +++ b/cpukit/score/cpu/sparc/cpu.c > @@ -210,10 +210,16 @@ void _CPU_ISR_install_raw_handler( > (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT; >slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK); > > - /* need to flush icache after this !!! */ > - > + /* > + * Changes to the trap table might be missed by other cores. > + * If the system state is up, the other cores can be notified > + * using SMP messages that they need to flush their icache. > + * If the up state has not been reached there is no need to > + * notify other cores. They will do an automatic flush of the > + * icache just after entering the up state, but before enabling > + * interrupts. > + */ This was needed for UP mode also, since stores to the trap table are cached in d-cache instead of i-cache, you need to flush/invalidate i-cache so the updated trap table entry will be loaded from memory. It should be made clear that this is the situation, else someone might think to make this invalidate conditional on SMP mode... -Gedare >rtems_cache_invalidate_entire_instruction(); > - > } > > void _CPU_ISR_install_vector( > -- > 1.7.9.5 > > ___ > devel mailing list > devel@rtems.org > http://lists.rtems.org/mailman/listinfo/devel ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
Re: [PATCH-V2 7/7] bsp/sparc: Flush only instruction cache
On Wed, Jul 9, 2014 at 3:02 AM, Daniel Cederman wrote: > The flush instruction on LEON flushes both the data and the instruction > cache. Flushing of just the instruction cache can be done by setting > the "flush instruction cache" bit in the cache control register. > --- > c/src/lib/libbsp/sparc/leon3/include/cache_.h |5 - > c/src/lib/libbsp/sparc/leon3/include/leon.h |1 + > 2 files changed, 5 insertions(+), 1 deletion(-) > > diff --git a/c/src/lib/libbsp/sparc/leon3/include/cache_.h > b/c/src/lib/libbsp/sparc/leon3/include/cache_.h > index 63790c1..ced5b6d 100644 > --- a/c/src/lib/libbsp/sparc/leon3/include/cache_.h > +++ b/c/src/lib/libbsp/sparc/leon3/include/cache_.h > @@ -136,7 +136,10 @@ static inline void _CPU_cache_unfreeze_data(void) > > static inline void _CPU_cache_invalidate_entire_instruction(void) > { > - __asm__ volatile ("flush"); > + uint32_t cache_reg = leon3_get_cache_control_register(); > + > + cache_reg |= LEON3_REG_CACHE_CTRL_FI; > + leon3_set_cache_control_register(cache_reg); > } Now you should also flush the d-cache explicitly for the case of updating the trap table, as I mentioned in my previous email the store to tbr[] can get cached in d-cache, so if we don't flush d-cache to memory and it is write-back cache, there could be a problem. (I don't know whether sparc32 cache are write-back or write-thru.) -Gedare > > static inline void _CPU_cache_invalidate_instruction_range( > diff --git a/c/src/lib/libbsp/sparc/leon3/include/leon.h > b/c/src/lib/libbsp/sparc/leon3/include/leon.h > index a62ad29..bc3cdde 100644 > --- a/c/src/lib/libbsp/sparc/leon3/include/leon.h > +++ b/c/src/lib/libbsp/sparc/leon3/include/leon.h > @@ -90,6 +90,7 @@ extern "C" { > * The following defines the bits in the LEON Cache Control Register. > */ > #define LEON3_REG_CACHE_CTRL_DS 0x0080 /* Data cache snooping */ > +#define LEON3_REG_CACHE_CTRL_FI 0x0020 /* Flush instruction cache */ > > /* LEON3 Interrupt Controller */ > extern volatile struct irqmp_regs *LEON3_IrqCtrl_Regs; > -- > 1.7.9.5 > > ___ > devel mailing list > devel@rtems.org > http://lists.rtems.org/mailman/listinfo/devel ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
Re: [PATCH 4/6] spintrcritical20: Fix incorrect assumption
I think this patch is wrong. Subsequent changes to the thread queue after I posted this resulted in the test breaking again. What is the test trying to do? I think sometimes through the loop, the condition the test expects is not occurring and it fails. On Jul 9, 2014 9:32 AM, Sebastian Huber wrote: On 2014-07-08 22:52, Joel Sherrill wrote: > The test assumed that the thread would have enough time to block > and become enqueued. In fact, the thread would still be in the > ready state and not blocked on the semaphore. Thus the state > of the Wait sub-structure in the TCB would not be in the expected > state. The simple solution was to continue when the thread was > in the ready state. > --- > testsuites/sptests/spintrcritical20/init.c |4 > 1 files changed, 4 insertions(+), 0 deletions(-) > > diff --git a/testsuites/sptests/spintrcritical20/init.c > b/testsuites/sptests/spintrcritical20/init.c > index cae8fdb..209c9e5 100644 > --- a/testsuites/sptests/spintrcritical20/init.c > +++ b/testsuites/sptests/spintrcritical20/init.c > @@ -20,6 +20,7 @@ > #include > #include > #include > +#include > > const char rtems_test_name[] = "SPINTRCRITICAL 20"; > > @@ -108,6 +109,9 @@ static void Init(rtems_task_argument ignored) > ++resets; > } > > +if (ctx->semaphore_task_tcb->current_state == STATES_READY) > + continue; In case the semaphore task is ready at this point, then we have a massive problem since the semaphore task has a higher priority. > + > _Thread_Disable_dispatch(); Should this check move to here? > > rtems_test_assert( > -- Sebastian Huber, embedded brains GmbH Address : Dornierstr. 4, D-82178 Puchheim, Germany Phone : +49 89 189 47 41-16 Fax : +49 89 189 47 41-09 E-Mail : sebastian.hu...@embedded-brains.de PGP : Public key available on request. Diese Nachricht ist keine geschäftliche Mitteilung im Sinne des EHUG. ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
Re: Adding capture support to score
Hi, On Tue, Jul 8, 2014 at 3:19 PM, Jennifer Averett wrote: > The attached patches are a starting point for discussions for adding capture > support > to core objects. We started to write notes based on our discussions but the > text > was harder to follow than just writing some code and commenting on it. It is > hard > to see the impact of "word changes" versus real changes. > Is this related to capture engine? if not, there needs to be some better terminology to avoid confusion. I find it easier to comment on patches that are sent "in-line", see instructions on the wiki Git page about using git-send-email. > The initial cut has one method call per Score Handler with an enumerated > parameter indicating points of interest. Most take a thread pointer and > an object id as arguments which can be interpreted based on the > event indicated by the enumerated first argument. > Why have separate event sets (types) for mutex and semaphore? (I will say "event set" to mean the points of interest associated with a class of objects. Thus, your first patch includes 3 event sets, one for generic Object, one for Mutex, and one for Semaphore.) Most all locks will have the same kinds of abstract events that affect them. > Some of the items we would like to discuss: > > 1) Should the number of methods be expanded to one method per point of > interest? > This might make sense, since probably the approach taken requires the one function handler per event set to determine the source of the event anyways, so they'll all probably just be implemented as a giant switch statement. It makes sense to me that there is a specific function invoked for each kind of event that occurs, and it could help with debugging since a backtrace makes more sense, and inlining can be done more aggressively with finer-grained functions. gcc can inline function pointers under certain conditions, although I forget what they are off-hand. I would choose a generic template for the function pointer though, something like... typedef void (*_Capture_Event_handler)(_Capture_Event e, Objects_Id Object, Thread_Control *thread); Or something similar, if it can be made generic enough to cover the parameters needed at each "point of interest". >Having one method per "event" or doing it this way is a no-win choice. The > callout > table will grow larger with more entries if we go to one function per > event. More > methods will be in the capture engine. But if we stick this way, there is a > smaller > callout table, fewer capture methods, but the capture methods likely will > have some > decode logic. > Perhaps, or you just need to be fastidious in how many events you include in an event set. You could also have a catch-all function, e.g. _Capture_Event_handler default; that can be invoked for when there isn't a specific handler for an event at some point of interest. > 2) This design focuses on pass paths for points of interest and ignores > failure paths, > is this the direction we wish to follow? > > There are potentially LOTS of failure points and the errors will be in the > supercore level. > Worse, a "try lock" which doesn't get the mutex could end up in a busy loop > swamping >the capture engine. > You may consider reporting only one failure at each point of interest. For debugging purposes, it probably helps to see failures? > 3) We think the callout table should be completely populated. The table is > either > full of methods or not. It is an error not to install a handler for every > event. This > simplified checks at run-time. > And if I'm only interested in tracking Object allocate/free? I still need to provide stubs (and suffer overhead) for locks or whatever other events there are? > 4) We discussed having the Score own a table with stubs so the calls are > always > present but that could negatively impact minimum footprint. This only puts a > single pointer and code to call. This point is arguable if we stick to the > small > number of handlers approach. Eliminating the "if table installed" check and > always > making the subroutine call to a stub MIGHT be better. But we thought > avoiding > a subroutine call in critical paths was best. > If you can figure out how to make gcc inline the function pointer for this case, it can be practicable since there should be zero-overhead after optimizations. That's all I have for now. -Gedare > So this is basically some code using one approach for discussion purposes. It > Is only known to compile and this may not be enough for you to compile it. :) > > We want to discuss the design approach and see where to take this. > > Jennifer Averett > On-Line Applications Research > > ___ > devel mailing list > devel@rtems.org > http://lists.rtems.org/mailman/listinfo/devel ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listin
Re: Misc on RBTree Thread Queue Priority Discipline Changes
On Tue, Jul 8, 2014 at 5:37 PM, Joel Sherrill wrote: > Hi > > If you take the patches in their entirety, most of the tests > appear to be about 500 bytes smaller on the erc32. > What is the change in wkspace size? Basically you add 3 pointers + enum to each TCB / thd proxy, but remove some space for the control node I guess. I would guess the code savings is worth it. > None of the tmtests do priority based blocking so I can't > report any changes there. > > There was historically a subroutine in the threadq calls for a discipline > specific routine. Using the RBTree, this resulted in only 3-5 lines > of code unique to the discipline in those discipline files. The other > 20+ lines was duplicated. The last patch folds those subroutines into > the main methods. > > There is still some room for clean up since the code that is in > threadblockingoperationcancel.c is open coded there and > three other places. So there are a total of four copies of this > code in the tree. > > + rtems/src/eventsurrender.c > + score/src/threadqdequeue.c > + score/src/threadqextract.c > + score/src/threadblockingoperationcancel.c > I do not see discipline used in threadblockingoperationcancel.c what do you mean? > Two of those need the debug check code and two do not. > Also the method name isn't right for all four cases. It indicates > its use on the "resource released in ISR" usage not the > normal "clean up from blocking" case. > > Suggestions on a new name and whether this should be > a real subroutine or a static inline is appreciated. Then I > can rework and reduce the code duplication. > Propose the code; I'm not sure what you mean to refactor into the new function. -Gedare > -- > Joel Sherrill, Ph.D. Director of Research & Development > joel.sherr...@oarcorp.comOn-Line Applications Research > Ask me about RTEMS: a free RTOS Huntsville AL 35805 > Support Available(256) 722-9985 > > ___ > devel mailing list > devel@rtems.org > http://lists.rtems.org/mailman/listinfo/devel ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
Re: Misc on RBTree Thread Queue Priority Discipline Changes
On 7/9/2014 10:34 AM, Gedare Bloom wrote: > On Tue, Jul 8, 2014 at 5:37 PM, Joel Sherrill > wrote: >> Hi >> >> If you take the patches in their entirety, most of the tests >> appear to be about 500 bytes smaller on the erc32. >> > What is the change in wkspace size? Basically you add 3 pointers + > enum to each TCB / thd proxy, but remove some space for the control > node I guess. I would guess the code savings is worth it. Good question. I was watching code space. :) Before: (gdb) p sizeof(Thread_queue_Control) $1 = 64 (gdb) p sizeof(Thread_Control) $2 = 360 After: (gdb) p sizeof(Thread_queue_Control) $1 = 40 (gdb) p sizeof(Thread_Control) $2 = 376 Summary: -24 Thread_queue_Control +16 Thread_Control Since thread queues are used in blocking objects, I assume there will be more of them in a system and this is a net win. Technically, they could be used in the Scheduler implementations. When a thread is not blocked, the RBTree_Node is unused. >> None of the tmtests do priority based blocking so I can't >> report any changes there. >> >> There was historically a subroutine in the threadq calls for a discipline >> specific routine. Using the RBTree, this resulted in only 3-5 lines >> of code unique to the discipline in those discipline files. The other >> 20+ lines was duplicated. The last patch folds those subroutines into >> the main methods. >> >> There is still some room for clean up since the code that is in >> threadblockingoperationcancel.c is open coded there and >> three other places. So there are a total of four copies of this >> code in the tree. >> >> + rtems/src/eventsurrender.c >> + score/src/threadqdequeue.c >> + score/src/threadqextract.c >> + score/src/threadblockingoperationcancel.c >> > I do not see discipline used in threadblockingoperationcancel.c what > do you mean? All four of those methods do exactly the same thing AFTER they deal with the event or threadq specific discipline code. I have a patch pending with this change. >> Two of those need the debug check code and two do not. >> Also the method name isn't right for all four cases. It indicates >> its use on the "resource released in ISR" usage not the >> normal "clean up from blocking" case. >> >> Suggestions on a new name and whether this should be >> a real subroutine or a static inline is appreciated. Then I >> can rework and reduce the code duplication. >> > Propose the code; I'm not sure what you mean to refactor into the new > function. I found a solution and will pose a patch. Two methods. The one used for undoing on a sync state trigger will be a wrapper for the one used for normal unblocking. > -Gedare > >> -- >> Joel Sherrill, Ph.D. Director of Research & Development >> joel.sherr...@oarcorp.comOn-Line Applications Research >> Ask me about RTEMS: a free RTOS Huntsville AL 35805 >> Support Available(256) 722-9985 >> >> ___ >> devel mailing list >> devel@rtems.org >> http://lists.rtems.org/mailman/listinfo/devel -- Joel Sherrill, Ph.D. Director of Research & Development joel.sherr...@oarcorp.comOn-Line Applications Research Ask me about RTEMS: a free RTOS Huntsville AL 35805 Support Available(256) 722-9985 ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel
[PATCH 6/6] Use Shared Method for Thread Unblock Cleanup
When a thread is removed from a thread queue or is unblocked by receiving an event, the same actions are required. + timeout watchdog canceled, + thread must be unblocked, and + (MP only) proxy cleaned up This patch makes sure there is only one copy of this code. --- cpukit/score/include/rtems/score/threadimpl.h| 18 ++ cpukit/score/src/threadblockingoperationcancel.c | 65 -- cpukit/score/src/threadqdequeue.c| 18 +- cpukit/score/src/threadqextract.c| 22 ++- 4 files changed, 63 insertions(+), 60 deletions(-) diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h index 4971e9d..5327e29 100644 --- a/cpukit/score/include/rtems/score/threadimpl.h +++ b/cpukit/score/include/rtems/score/threadimpl.h @@ -403,6 +403,24 @@ void _Thread_blocking_operation_Cancel( ISR_Level level ); +/** + * @brief Finalize a blocking operation. + * + * This method is used to finalize a blocking operation that was + * satisfied. It may be used with thread queues or any other synchronization + * object that uses the blocking states and watchdog times for timeout. + * + * This method will restore the previous ISR disable level during the cancel + * operation. Thus it is an implicit _ISR_Enable(). + * + * @param[in] the_thread is the thread whose blocking is canceled + * @param[in] level is the previous ISR disable level + */ +void _Thread_blocking_operation_Finalize( + Thread_Control *the_thread, + ISR_Level level +); + RTEMS_INLINE_ROUTINE Per_CPU_Control *_Thread_Get_CPU( const Thread_Control *thread ) diff --git a/cpukit/score/src/threadblockingoperationcancel.c b/cpukit/score/src/threadblockingoperationcancel.c index 127d852..b496796 100644 --- a/cpukit/score/src/threadblockingoperationcancel.c +++ b/cpukit/score/src/threadblockingoperationcancel.c @@ -24,6 +24,41 @@ #endif #include +void _Thread_blocking_operation_Finalize( + Thread_Control *the_thread, + ISR_Level level +) +{ + /* + * The thread is not waiting on anything after this completes. + */ + the_thread->Wait.queue = NULL; + + /* + * If the sync state is timed out, this is very likely not needed. + * But better safe than sorry when it comes to critical sections. + */ + if ( _Watchdog_Is_active( &the_thread->Timer ) ) { +_Watchdog_Deactivate( &the_thread->Timer ); +_ISR_Enable( level ); +(void) _Watchdog_Remove( &the_thread->Timer ); + } else +_ISR_Enable( level ); + + /* + * Global objects with thread queue's should not be operated on from an + * ISR. But the sync code still must allow short timeouts to be processed + * correctly. + */ + + _Thread_Unblock( the_thread ); + +#if defined(RTEMS_MULTIPROCESSING) + if ( !_Objects_Is_local_id( the_thread->Object.id ) ) +_Thread_MP_Free_proxy( the_thread ); +#endif +} + void _Thread_blocking_operation_Cancel( #if defined(RTEMS_DEBUG) Thread_blocking_operation_States sync_state, @@ -59,33 +94,5 @@ void _Thread_blocking_operation_Cancel( } #endif - /* - * The thread is not waiting on anything after this completes. - */ - the_thread->Wait.queue = NULL; - - /* - * If the sync state is timed out, this is very likely not needed. - * But better safe than sorry when it comes to critical sections. - */ - if ( _Watchdog_Is_active( &the_thread->Timer ) ) { -_Watchdog_Deactivate( &the_thread->Timer ); -_ISR_Enable( level ); -(void) _Watchdog_Remove( &the_thread->Timer ); - } else -_ISR_Enable( level ); - - /* - * Global objects with thread queue's should not be operated on from an - * ISR. But the sync code still must allow short timeouts to be processed - * correctly. - */ - - _Thread_Unblock( the_thread ); - -#if defined(RTEMS_MULTIPROCESSING) - if ( !_Objects_Is_local_id( the_thread->Object.id ) ) -_Thread_MP_Free_proxy( the_thread ); -#endif - + _Thread_blocking_operation_Finalize( the_thread, level ); } diff --git a/cpukit/score/src/threadqdequeue.c b/cpukit/score/src/threadqdequeue.c index 3b55e52..d745ef2 100644 --- a/cpukit/score/src/threadqdequeue.c +++ b/cpukit/score/src/threadqdequeue.c @@ -70,22 +70,10 @@ Thread_Control *_Thread_queue_Dequeue( /* * We found a thread to unblock. + * + * NOTE: This is invoked with interrupts still disabled. */ - the_thread->Wait.queue = NULL; - if ( !_Watchdog_Is_active( &the_thread->Timer ) ) { -_ISR_Enable( level ); - } else { -_Watchdog_Deactivate( &the_thread->Timer ); -_ISR_Enable( level ); -(void) _Watchdog_Remove( &the_thread->Timer ); - } - - _Thread_Unblock( the_thread ); - -#if defined(RTEMS_MULTIPROCESSING) - if ( !_Objects_Is_local_id( the_thread->Object.id ) ) -_Thread_MP_Free_proxy( the_thread ); -#endif + _Thread_blocking_operation_Finalize( the_thr
Re: [rtems commit] bsps: Basic console driver for Termios devices
This is duplicative of the following files in libbsp/shared: console_control.c console_read.c console_write.c Please just add individual files for the other methods and list them. And yes, there are BSPs that only use some of these and provide their own versions of others. On 7/9/2014 5:52 AM, Sebastian Huber wrote: > Module:rtems > Branch:master > Commit:a0eb21ebabcc089e2ab4a5850b5c67875144cb0a > Changeset: > http://git.rtems.org/rtems/commit/?id=a0eb21ebabcc089e2ab4a5850b5c67875144cb0a > > Author:Sebastian Huber > Date: Fri Jun 27 16:20:44 2014 +0200 > > bsps: Basic console driver for Termios devices > > --- > > c/src/lib/libbsp/shared/console-termios.c | 73 > + > 1 files changed, 73 insertions(+), 0 deletions(-) > > diff --git a/c/src/lib/libbsp/shared/console-termios.c > b/c/src/lib/libbsp/shared/console-termios.c > new file mode 100644 > index 000..f57b06c > --- /dev/null > +++ b/c/src/lib/libbsp/shared/console-termios.c > @@ -0,0 +1,73 @@ > +/* > + * Copyright (c) 2014 embedded brains GmbH. All rights reserved. > + * > + * embedded brains GmbH > + * Dornierstr. 4 > + * 82178 Puchheim > + * Germany > + * > + * > + * The license and distribution terms for this file may be > + * found in the file LICENSE in this distribution or at > + * http://www.rtems.org/license/LICENSE. > + */ > + > +#include > +#include > + > +rtems_device_driver console_open( > + rtems_device_major_number major, > + rtems_device_minor_number minor, > + void *arg > +) > +{ > + return rtems_termios_device_open( major, minor, arg ); > +} > + > +rtems_device_driver console_close( > + rtems_device_major_number major, > + rtems_device_minor_number minor, > + void *arg > +) > +{ > + (void) major; > + (void) minor; > + > + return rtems_termios_device_close( arg ); > +} > + > +rtems_device_driver console_read( > + rtems_device_major_number major, > + rtems_device_minor_number minor, > + void *arg > +) > +{ > + (void) major; > + (void) minor; > + > + return rtems_termios_read( arg ); > +} > + > +rtems_device_driver console_write( > + rtems_device_major_number major, > + rtems_device_minor_number minor, > + void *arg > +) > +{ > + (void) major; > + (void) minor; > + > + return rtems_termios_write( arg ); > +} > + > +rtems_device_driver console_control( > + rtems_device_major_number major, > + rtems_device_minor_number minor, > + void *arg > +) > +{ > + (void) major; > + (void) minor; > + > + return rtems_termios_ioctl( arg ); > +} > > ___ > vc mailing list > v...@rtems.org > http://lists.rtems.org/mailman/listinfo/vc -- Joel Sherrill, Ph.D. Director of Research & Development joel.sherr...@oarcorp.comOn-Line Applications Research Ask me about RTEMS: a free RTOS Huntsville AL 35805 Support Available(256) 722-9985 ___ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel