On Jul 4, 2014 10:09 AM, Daniel Cederman <ceder...@gaisler.com> wrote: > > > This limits the API to the default cpu_set_t. Other routines like > > pthread_setaffinity_np() don't have this limitation. > > I looked at pthread_setaffinity_np() and got a bit confused. I see that > it takes both a pointer to a cpu_set_t and the size of the cpu set. It > forwards it to _Scheduler_Set_affinity which requires > __RTEMS_HAVE_SYS_CPUSET_H__ to be defined. If this is defined then > _CPU_set_Handler_initialization checks that the number of cpus is less > than CPU_SETSIZE. In newlib this is defined to 32 and is used to size > the cpu_set_t struct. So why is the size needed for > pthread_setaffinity_np if the number of cpus cannot exceed a hardcoded > constant? I recall that there was a discussion on the list about cpu > sets, but I'm not finding anything searching.
This keeps the API compatible with Linux and *BSD even though we don't support more than 32 yet. So planning ahead. > Daniel Cederman > Software Engineer > Aeroflex Gaisler AB > Aeroflex Microelectronic Solutions – HiRel > Kungsgatan 12 > SE-411 19 Gothenburg, Sweden > Phone: +46 31 7758665 > ceder...@gaisler.com > www.Aeroflex.com/Gaisler > > On 2014-07-04 08:38, Sebastian Huber wrote: > > On 2014-07-03 11:37, Daniel Cederman wrote: > >> Adds functions that allows the user to specify which cores that should > >> perform the cache operation. SMP messages are sent to all the specified > >> cores and the caller waits until all cores have acknowledged that they > >> have flushed their cache. Implementation is shown using both function > >> pointers and function ids together with a switch statement. One needs > >> to be dropped before committing. Any preference? > >> --- > >> c/src/lib/libcpu/shared/src/cache_manager.c | 166 > >> +++++++++++++++++++++++++++ > >> cpukit/rtems/include/rtems/rtems/cache.h | 84 ++++++++++++++ > >> cpukit/score/include/rtems/score/smpimpl.h | 13 +++ > >> 3 files changed, 263 insertions(+) > >> > >> diff --git a/c/src/lib/libcpu/shared/src/cache_manager.c > >> b/c/src/lib/libcpu/shared/src/cache_manager.c > >> index 420a013..91e9f72 100644 > >> --- a/c/src/lib/libcpu/shared/src/cache_manager.c > >> +++ b/c/src/lib/libcpu/shared/src/cache_manager.c > >> @@ -37,6 +37,172 @@ > >> > >> #include <rtems.h> > >> #include "cache_.h" > >> +#include <rtems/score/smplock.h> > >> +#include <rtems/score/smpimpl.h> > >> + > >> +#if defined( RTEMS_SMP ) > >> + > >> +typedef void (*cache_manager_func_t)(const void * d_addr, size_t > >> n_bytes); > > > > The *_t namespace is reserved by POSIX. The names don't follow the > > score naming conventions. > > > > http://www.rtems.org/wiki/index.php/Naming_rules > > > >> + > >> +typedef enum { > >> + FLUSH_MULTIPLE_DATA_LINES, > >> + INVALIDATE_MULTIPLE_DATA_LINES, > >> + INVALIDATE_MULTIPLE_INSTRUCTION_LINES, > >> + FLUSH_ENTIRE_DATA, > >> + INVALIDATE_ENTIRE_INSTRUCTION, > >> + INVALIDATE_ENTIRE_DATA > >> +} cache_manager_func_id_t; > >> + > >> +typedef struct { > >> + SMP_lock_Control lock; > >> + cache_manager_func_t func; > >> + cache_manager_func_id_t func_id; > >> + Atomic_Uint count; > >> + const void *addr; > >> + size_t size; > >> +} Cache_Manager_SMP; > >> + > >> +static Cache_Manager_SMP _CM_SMP = { > >> + .lock = SMP_LOCK_INITIALIZER("CacheMgr"), > >> + .count = CPU_ATOMIC_INITIALIZER_UINT(0) > >> +}; > >> + > >> +void > >> +_SMP_Cache_manager_ipi_handler(void) > > > > I would rather name this _SMP_Cache_manager_message_handler(). > > > >> +{ > >> +#ifdef USE_FUNCPTR > >> + _CM_SMP.func( _CM_SMP.addr, _CM_SMP.size ); > >> +#else > >> + switch( _CM_SMP.func_id ) { > >> + case FLUSH_MULTIPLE_DATA_LINES: > >> + rtems_cache_flush_multiple_data_lines( > >> + _CM_SMP.addr, _CM_SMP.size ); > >> + break; > >> + case INVALIDATE_MULTIPLE_DATA_LINES: > >> + rtems_cache_invalidate_multiple_data_lines( > >> + _CM_SMP.addr, _CM_SMP.size ); > >> + break; > >> + case INVALIDATE_MULTIPLE_INSTRUCTION_LINES: > >> + rtems_cache_invalidate_multiple_instruction_lines( > >> + _CM_SMP.addr, _CM_SMP.size ); > >> + break; > >> + case FLUSH_ENTIRE_DATA: > >> + rtems_cache_flush_entire_data(); > >> + break; > >> + case INVALIDATE_ENTIRE_INSTRUCTION: > >> + rtems_cache_invalidate_entire_instruction(); > >> + break; > >> + case INVALIDATE_ENTIRE_DATA: > >> + rtems_cache_invalidate_entire_data(); > >> + break; > >> + default: > >> + _Assert( 0 ); > >> + break; > >> + } > >> +#endif > >> + > >> + _Atomic_Fetch_sub_uint( &_CM_SMP.count, 1, ATOMIC_ORDER_RELEASE ); > >> +} > >> + > >> +static void > >> +_cache_manager_send_ipi_msg( const cpu_set_t *set, > >> cache_manager_func_t func, > >> + cache_manager_func_id_t func_id, const void * addr, size_t size ) > >> +{ > >> + uint32_t i; > >> + uint32_t set_size = 0; > >> + SMP_lock_Context lock_context; > >> + > >> + _Assert( _System_state_Is_up( _System_state_Get() ) ); > >> + > >> + for( i=0; i < _SMP_Get_processor_count(); ++i ) { > >> + set_size += CPU_ISSET(i, set); > >> + } > >> + > >> + _SMP_lock_Acquire( &_CM_SMP.lock, &lock_context ); > > > > With this implementation cache routines must not be called from > > interrupt context. This should be mentioned in the documentation. > > > > It is extremely difficult to implement it in a way so that it can be > > used from interrupt context. > > > >> + > >> + _CM_SMP.func = func; > >> + _CM_SMP.func_id = func_id; > >> + _CM_SMP.addr = addr; > >> + _CM_SMP.size = size; > >> + _Atomic_Store_uint( &_CM_SMP.count, set_size, ATOMIC_ORDER_RELEASE ); > >> + _Atomic_Fence( ATOMIC_ORDER_RELEASE ); > >> + > >> + _SMP_Send_message_cpu_set( set, SMP_MESSAGE_CACHE_MANAGER ); > >> + > >> + while(_Atomic_Load_uint( &_CM_SMP.count, ATOMIC_ORDER_ACQUIRE ) != > >> 0 ); > >> + > >> + _SMP_lock_Release( &_CM_SMP.lock, &lock_context ); > >> +} > >> + > >> +void > >> +rtems_cache_invalidate_entire_instruction_cpu_set ( const cpu_set_t > >> *set ) > > > > This limits the API to the default cpu_set_t. Other routines like > > pthread_setaffinity_np() don't have this limitation. > > > > On other places we don't use "cpu" instead we use "processor" it should > > be consistent in the high level RTEMS APIs. > > > > [...] > > > _______________________________________________ > devel mailing list > devel@rtems.org > http://lists.rtems.org/mailman/listinfo/devel
_______________________________________________ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel