blob: b8cb58fa1ca2e38cdeebfc553c2d77a65b1b69c7 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
|
/*
* @file
*
* @brief Wait for Spinlock
* @ingroup ScoreSpinlock
*/
/*
* COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
*/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems/system.h>
#include <rtems/score/corespinlockimpl.h>
#include <rtems/score/thread.h>
#include <rtems/score/threaddispatch.h>
/*
* _CORE_spinlock_Wait
*
* This function waits for the spinlock to become available. Optionally,
* a limit may be placed on the duration of the spin.
*
* Input parameters:
* the_spinlock - the spinlock control block to initialize
* wait - true if willing to wait
* timeout - the maximum number of ticks to spin (0 is forever)
*
* Output parameters: NONE
*/
CORE_spinlock_Status _CORE_spinlock_Wait(
CORE_spinlock_Control *the_spinlock,
bool wait,
Watchdog_Interval timeout
)
{
ISR_Level level;
#if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API)
Watchdog_Interval limit = _Watchdog_Ticks_since_boot + timeout;
#endif
_ISR_Disable( level );
if ( (the_spinlock->lock == CORE_SPINLOCK_LOCKED) &&
(the_spinlock->holder == _Thread_Executing->Object.id) ) {
_ISR_Enable( level );
return CORE_SPINLOCK_HOLDER_RELOCKING;
}
the_spinlock->users += 1;
for ( ;; ) {
if ( the_spinlock->lock == CORE_SPINLOCK_UNLOCKED ) {
the_spinlock->lock = CORE_SPINLOCK_LOCKED;
the_spinlock->holder = _Thread_Executing->Object.id;
_ISR_Enable( level );
return CORE_SPINLOCK_SUCCESSFUL;
}
/*
* Spinlock is unavailable. If not willing to wait, return.
*/
if ( !wait ) {
the_spinlock->users -= 1;
_ISR_Enable( level );
return CORE_SPINLOCK_UNAVAILABLE;
}
#if defined(FUNCTIONALITY_NOT_CURRENTLY_USED_BY_ANY_API)
/*
* They are willing to wait but there could be a timeout.
*/
if ( timeout && (limit <= _Watchdog_Ticks_since_boot) ) {
the_spinlock->users -= 1;
_ISR_Enable( level );
return CORE_SPINLOCK_TIMEOUT;
}
#endif
/*
* The thread is willing to spin so let's set things up so
* another thread has a chance of running. This spinlock has
* to be released by either another thread or an ISR. Since
* POSIX does not say anything about ISRs, that implies that
* another thread must be able to run while spinning. We are
* not blocking so that implies we are at least preemptible
* and possibly time-sliced.
*
* So first, we will enable interrpts to allow for them to happen.
* Then we will "flash" the thread dispatching critical section
* so other threads have a chance to run.
*
* A spinlock cannot be deleted while it is being used so we are
* safe from deletion.
*/
_ISR_Enable( level );
/* An ISR could occur here */
_Thread_Enable_dispatch();
/* Another thread could get dispatched here */
/* Reenter the critical sections so we can attempt the lock again. */
_Thread_Disable_dispatch();
_ISR_Disable( level );
}
}
|