summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/corespinlockwait.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-03-18 14:03:01 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-03-18 15:36:58 +0100
commit5a5fb3b9d6d99d6751d129458217f1a3b5b85ff8 (patch)
tree9f2296b7e4abaa0da454caea84e2fcbc0eb49fd2 /cpukit/score/src/corespinlockwait.c
parentscore: Add _Objects_Get_by_name() (diff)
downloadrtems-5a5fb3b9d6d99d6751d129458217f1a3b5b85ff8.tar.bz2
score: Avoid Giant lock for CORE spinlock
Use an ISR lock to protect the spinlock state. Remove empty attributes. Update #2555.
Diffstat (limited to 'cpukit/score/src/corespinlockwait.c')
-rw-r--r--cpukit/score/src/corespinlockwait.c63
1 files changed, 25 insertions, 38 deletions
diff --git a/cpukit/score/src/corespinlockwait.c b/cpukit/score/src/corespinlockwait.c
index 1f102962ac..cc939c2344 100644
--- a/cpukit/score/src/corespinlockwait.c
+++ b/cpukit/score/src/corespinlockwait.c
@@ -18,48 +18,36 @@
#include "config.h"
#endif
-#include <rtems/system.h>
#include <rtems/score/corespinlockimpl.h>
-#include <rtems/score/thread.h>
-#include <rtems/score/threaddispatch.h>
+#include <rtems/score/percpu.h>
-/*
- * _CORE_spinlock_Wait
- *
- * This function waits for the spinlock to become available. Optionally,
- * a limit may be placed on the duration of the spin.
- *
- * Input parameters:
- * the_spinlock - the spinlock control block to initialize
- * wait - true if willing to wait
- * timeout - the maximum number of ticks to spin (0 is forever)
- *
- * Output parameters: NONE
- */
-
-CORE_spinlock_Status _CORE_spinlock_Wait(
- CORE_spinlock_Control *the_spinlock,
- bool wait,
- Watchdog_Interval timeout
+CORE_spinlock_Status _CORE_spinlock_Seize(
+ CORE_spinlock_Control *the_spinlock,
+ bool wait,
+ Watchdog_Interval timeout,
+ ISR_lock_Context *lock_context
)
{
- ISR_Level level;
+ Thread_Control *executing;
+
#if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API)
Watchdog_Interval limit = _Watchdog_Ticks_since_boot + timeout;
#endif
- _ISR_Disable( level );
- if ( (the_spinlock->lock == CORE_SPINLOCK_LOCKED) &&
- (the_spinlock->holder == _Thread_Executing->Object.id) ) {
- _ISR_Enable( level );
+ executing = _Thread_Executing;
+
+ _CORE_spinlock_Acquire_critical( the_spinlock, lock_context );
+ if ( the_spinlock->lock == CORE_SPINLOCK_LOCKED &&
+ the_spinlock->holder == executing ) {
+ _CORE_spinlock_Release( the_spinlock, lock_context );
return CORE_SPINLOCK_HOLDER_RELOCKING;
}
the_spinlock->users += 1;
for ( ;; ) {
if ( the_spinlock->lock == CORE_SPINLOCK_UNLOCKED ) {
the_spinlock->lock = CORE_SPINLOCK_LOCKED;
- the_spinlock->holder = _Thread_Executing->Object.id;
- _ISR_Enable( level );
+ the_spinlock->holder = executing;
+ _CORE_spinlock_Release( the_spinlock, lock_context );
return CORE_SPINLOCK_SUCCESSFUL;
}
@@ -68,7 +56,7 @@ CORE_spinlock_Status _CORE_spinlock_Wait(
*/
if ( !wait ) {
the_spinlock->users -= 1;
- _ISR_Enable( level );
+ _CORE_spinlock_Release( the_spinlock, lock_context );
return CORE_SPINLOCK_UNAVAILABLE;
}
@@ -78,7 +66,7 @@ CORE_spinlock_Status _CORE_spinlock_Wait(
*/
if ( timeout && (limit <= _Watchdog_Ticks_since_boot) ) {
the_spinlock->users -= 1;
- _ISR_Enable( level );
+ _CORE_spinlock_Release( the_spinlock, lock_context );
return CORE_SPINLOCK_TIMEOUT;
}
#endif
@@ -100,16 +88,15 @@ CORE_spinlock_Status _CORE_spinlock_Wait(
* safe from deletion.
*/
- _ISR_Enable( level );
- /* An ISR could occur here */
-
- _Thread_Enable_dispatch();
- /* Another thread could get dispatched here */
+ _CORE_spinlock_Release( the_spinlock, lock_context );
- /* Reenter the critical sections so we can attempt the lock again. */
- _Thread_Disable_dispatch();
+ /*
+ * An ISR could occur here. Another thread could get dispatched here.
+ * Reenter the critical sections so we can attempt the lock again.
+ */
- _ISR_Disable( level );
+ _ISR_lock_ISR_disable( lock_context );
+ _CORE_spinlock_Acquire_critical( the_spinlock, lock_context );
}
}