summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/coremutexsurrender.c
blob: 1da98276bfb0e6a7293c62a5ec369d8992c82599 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
/**
 * @file
 *
 * @brief Surrender the Mutex
 * @ingroup ScoreMutex
 */

/*
 *  COPYRIGHT (c) 1989-2006.
 *  On-Line Applications Research Corporation (OAR).
 *
 *  The license and distribution terms for this file may be
 *  found in the file LICENSE in this distribution or at
 *  http://www.rtems.org/license/LICENSE.
 */

#if HAVE_CONFIG_H
#include "config.h"
#endif

#include <rtems/system.h>
#include <rtems/score/isr.h>
#include <rtems/score/coremuteximpl.h>
#include <rtems/score/thread.h>

Status_Control _CORE_mutex_Surrender(
  CORE_mutex_Control   *the_mutex,
  Thread_queue_Context *queue_context
)
{
  Thread_Control *the_thread;
  Thread_Control *holder;

  holder = the_mutex->holder;

  /*
   *  The following code allows a thread (or ISR) other than the thread
   *  which acquired the mutex to release that mutex.  This is only
   *  allowed when the mutex in quetion is FIFO or simple Priority
   *  discipline.  But Priority Ceiling or Priority Inheritance mutexes
   *  must be released by the thread which acquired them.
   */

  if ( the_mutex->Attributes.only_owner_release ) {
    if ( !_Thread_Is_executing( holder ) ) {
      _ISR_lock_ISR_enable( &queue_context->Lock_context );
      return STATUS_NOT_OWNER;
    }
  }

  _CORE_mutex_Acquire_critical( the_mutex, queue_context );

  /* XXX already unlocked -- not right status */

  if ( !the_mutex->nest_count ) {
    _CORE_mutex_Release( the_mutex, queue_context );
    return STATUS_SUCCESSFUL;
  }

  the_mutex->nest_count--;

  if ( the_mutex->nest_count != 0 ) {
    /*
     *  All error checking is on the locking side, so if the lock was
     *  allowed to acquired multiple times, then we should just deal with
     *  that.  The RTEMS_DEBUG is just a validation.
     */
    #if defined(RTEMS_DEBUG)
      switch ( the_mutex->Attributes.lock_nesting_behavior ) {
        case CORE_MUTEX_NESTING_ACQUIRES:
          _CORE_mutex_Release( the_mutex, queue_context );
          return STATUS_SUCCESSFUL;
        #if defined(RTEMS_POSIX_API)
          case CORE_MUTEX_NESTING_IS_ERROR:
            /* should never occur */
            _CORE_mutex_Release( the_mutex, queue_context );
            return STATUS_NESTING_NOT_ALLOWED;
        #endif
        case CORE_MUTEX_NESTING_BLOCKS:
          /* Currently no API exercises this behavior. */
          break;
      }
    #else
      _CORE_mutex_Release( the_mutex, queue_context );
      /* must be CORE_MUTEX_NESTING_ACQUIRES or we wouldn't be here */
      return STATUS_SUCCESSFUL;
    #endif
  }

  /*
   *  Formally release the mutex before possibly transferring it to a
   *  blocked thread.
   */
  if ( _CORE_mutex_Is_inherit_priority( &the_mutex->Attributes ) ||
       _CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) {
    holder->resource_count--;
  }
  the_mutex->holder = NULL;

  /*
   *  Now we check if another thread was waiting for this mutex.  If so,
   *  transfer the mutex to that thread.
   */
  if (
    ( the_thread = _Thread_queue_First_locked(
        &the_mutex->Wait_queue,
        the_mutex->operations
      )
    )
  ) {
    bool unblock;

    the_mutex->holder     = the_thread;
    the_mutex->nest_count = 1;

    /*
     * We must extract the thread now since this will restore its default
     * thread lock.  This is necessary to avoid a deadlock in the
     * _Thread_Change_priority() below due to a recursive thread queue lock
     * acquire.
     */
    unblock = _Thread_queue_Extract_locked(
      &the_mutex->Wait_queue.Queue,
      the_mutex->operations,
      the_thread,
      queue_context
    );

#if defined(RTEMS_MULTIPROCESSING)
    if ( _Objects_Is_local_id( the_thread->Object.id ) )
#endif
    {
      switch ( the_mutex->Attributes.discipline ) {
        case CORE_MUTEX_DISCIPLINES_FIFO:
        case CORE_MUTEX_DISCIPLINES_PRIORITY:
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
          the_thread->resource_count++;
          _Thread_queue_Boost_priority( &the_mutex->Wait_queue.Queue, the_thread );
          break;
        case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
          the_thread->resource_count++;
          _Thread_Raise_priority(
            the_thread,
            the_mutex->Attributes.priority_ceiling
          );
          break;
      }
    }

    _Thread_queue_Unblock_critical(
      unblock,
      &the_mutex->Wait_queue.Queue,
      the_thread,
      &queue_context->Lock_context
    );
  } else {
    _CORE_mutex_Release( the_mutex, queue_context );
  }

  /*
   *  Whether or not someone is waiting for the mutex, an
   *  inherited priority must be lowered if this is the last
   *  mutex (i.e. resource) this task has.
   */
  if ( !_Thread_Owns_resources( holder ) ) {
    /*
     * Ensure that the holder resource count is visible to all other processors
     * and that we read the latest priority restore hint.
     */
    _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );

    if ( holder->priority_restore_hint ) {
      Per_CPU_Control *cpu_self;

      cpu_self = _Thread_Dispatch_disable();
      _Thread_Restore_priority( holder );
      _Thread_Dispatch_enable( cpu_self );
    }
  }

  return STATUS_SUCCESSFUL;
}