summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/percpu.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-03-03 13:37:14 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-03-04 13:36:09 +0100
commit5b0d2c1965e1dab2915231866aaa5623ced66330 (patch)
treee425e0eb73815b874efbd946c80047341f304793 /cpukit/score/src/percpu.c
parentscore: Add _Per_CPU_Is_boot_processor() (diff)
downloadrtems-5b0d2c1965e1dab2915231866aaa5623ced66330.tar.bz2
score: Add _SMP_Before_multitasking_action()
The use case for this is the Cortex-A9 MPCore which has per-processor registers (only accessible by a particular processor) for the global timer used by the clock driver. This might be useful for other drivers as well. Update #2554.
Diffstat (limited to 'cpukit/score/src/percpu.c')
-rw-r--r--cpukit/score/src/percpu.c92
1 files changed, 85 insertions, 7 deletions
diff --git a/cpukit/score/src/percpu.c b/cpukit/score/src/percpu.c
index 730528ae4c..0e4c0678e7 100644
--- a/cpukit/score/src/percpu.c
+++ b/cpukit/score/src/percpu.c
@@ -20,6 +20,7 @@
#include <rtems/score/percpu.h>
#include <rtems/score/assert.h>
+#include <rtems/score/isrlock.h>
#include <rtems/score/smpimpl.h>
#include <rtems/config.h>
@@ -35,11 +36,48 @@ RTEMS_STATIC_ASSERT(
#if defined(RTEMS_SMP)
-static SMP_lock_Control _Per_CPU_State_lock =
- SMP_LOCK_INITIALIZER("per-CPU state");
+typedef struct {
+ SMP_Action_handler handler;
+ void *arg;
+} SMP_Before_multicast_action;
+
+ISR_LOCK_DEFINE( static, _Per_CPU_State_lock, "Per-CPU State" )
+
+static void _Per_CPU_State_acquire( ISR_lock_Context *lock_context )
+{
+ _ISR_lock_ISR_disable_and_acquire( &_Per_CPU_State_lock, lock_context );
+}
+
+static void _Per_CPU_State_release( ISR_lock_Context *lock_context )
+{
+ _ISR_lock_Release_and_ISR_enable( &_Per_CPU_State_lock, lock_context );
+}
+
+static void _Per_CPU_State_before_multitasking_action( Per_CPU_Control *cpu )
+{
+ uintptr_t action_value;
+
+ action_value = _Atomic_Load_uintptr(
+ &cpu->before_multitasking_action,
+ ATOMIC_ORDER_ACQUIRE
+ );
+
+ if ( action_value != 0 ) {
+ SMP_Before_multicast_action *action =
+ (SMP_Before_multicast_action *) action_value;
+
+ ( *action->handler )( action->arg );
+
+ _Atomic_Store_uintptr(
+ &cpu->before_multitasking_action,
+ 0,
+ ATOMIC_ORDER_RELEASE
+ );
+ }
+}
static void _Per_CPU_State_busy_wait(
- const Per_CPU_Control *cpu,
+ Per_CPU_Control *cpu,
Per_CPU_State new_state
)
{
@@ -60,6 +98,7 @@ static void _Per_CPU_State_busy_wait(
state != PER_CPU_STATE_REQUEST_START_MULTITASKING
&& state != PER_CPU_STATE_SHUTDOWN
) {
+ _Per_CPU_State_before_multitasking_action( cpu );
_CPU_SMP_Processor_event_receive();
state = cpu->state;
}
@@ -122,13 +161,12 @@ void _Per_CPU_State_change(
Per_CPU_State new_state
)
{
- SMP_lock_Control *lock = &_Per_CPU_State_lock;
- SMP_lock_Context lock_context;
+ ISR_lock_Context lock_context;
Per_CPU_State next_state;
_Per_CPU_State_busy_wait( cpu, new_state );
- _SMP_lock_ISR_disable_and_acquire( lock, &lock_context );
+ _Per_CPU_State_acquire( &lock_context );
next_state = _Per_CPU_State_get_next( cpu->state, new_state );
cpu->state = next_state;
@@ -157,7 +195,7 @@ void _Per_CPU_State_change(
_CPU_SMP_Processor_event_broadcast();
- _SMP_lock_Release_and_ISR_enable( lock, &lock_context );
+ _Per_CPU_State_release( &lock_context );
if (
next_state == PER_CPU_STATE_SHUTDOWN
@@ -167,6 +205,46 @@ void _Per_CPU_State_change(
}
}
+bool _SMP_Before_multitasking_action(
+ Per_CPU_Control *cpu,
+ SMP_Action_handler handler,
+ void *arg
+)
+{
+ bool done;
+
+ _Assert( _Per_CPU_Is_boot_processor( _Per_CPU_Get() ) );
+
+ if ( _Per_CPU_Is_processor_online( cpu ) ) {
+ SMP_Before_multicast_action action = {
+ .handler = handler,
+ .arg = arg
+ };
+ Per_CPU_State expected_state = PER_CPU_STATE_READY_TO_START_MULTITASKING;
+
+ _Atomic_Store_uintptr(
+ &cpu->before_multitasking_action,
+ (uintptr_t) &action,
+ ATOMIC_ORDER_RELEASE
+ );
+
+ _CPU_SMP_Processor_event_broadcast();
+
+ _Per_CPU_State_busy_wait( cpu, expected_state );
+
+ do {
+ done = _Atomic_Load_uintptr(
+ &cpu->before_multitasking_action,
+ ATOMIC_ORDER_ACQUIRE
+ ) == 0;
+ } while ( !done && cpu->state == expected_state );
+ } else {
+ done = false;
+ }
+
+ return done;
+}
+
#else
/*
* On single core systems, we can efficiently directly access a single