summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2014-05-21 10:33:43 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2014-05-28 10:50:41 +0200
commit8fcafdd553f3a564ecb5ab5093d01b29971418da (patch)
tree7807b3a4ae28e62f0218f8e20051b1b7e0143206
parentbsps/sparc: Change tabs to spaces. (diff)
downloadrtems-8fcafdd553f3a564ecb5ab5093d01b29971418da.tar.bz2
score: Multiprocessor Resource Sharing Protocol
Add basic support for the Multiprocessor Resource Sharing Protocol (MrsP). The Multiprocessor Resource Sharing Protocol (MrsP) is defined in A. Burns and A.J. Wellings, A Schedulability Compatible Multiprocessor Resource Sharing Protocol - MrsP, Proceedings of the 25th Euromicro Conference on Real-Time Systems (ECRTS 2013), July 2013. It is a generalization of the Priority Ceiling Protocol to SMP systems. Each MrsP semaphore uses a ceiling priority per scheduler instance. These ceiling priorities can be specified with rtems_semaphore_set_priority(). A task obtaining or owning a MrsP semaphore will execute with the ceiling priority for its scheduler instance as specified by the MrsP semaphore object. Tasks waiting to get ownership of a MrsP semaphore will not relinquish the processor voluntarily. In case the owner of a MrsP semaphore gets preempted it can ask all tasks waiting for this semaphore to help out and temporarily borrow the right to execute on one of their assigned processors. The help out feature is not implemented with this patch.
-rw-r--r--cpukit/rtems/Makefile.am1
-rw-r--r--cpukit/rtems/include/rtems/rtems/attr.h14
-rw-r--r--cpukit/rtems/include/rtems/rtems/attrimpl.h31
-rw-r--r--cpukit/rtems/include/rtems/rtems/sem.h46
-rw-r--r--cpukit/rtems/include/rtems/rtems/semimpl.h9
-rw-r--r--cpukit/rtems/src/semcreate.c38
-rw-r--r--cpukit/rtems/src/semdelete.c21
-rw-r--r--cpukit/rtems/src/semflush.c10
-rw-r--r--cpukit/rtems/src/semobtain.c19
-rw-r--r--cpukit/rtems/src/semrelease.c14
-rw-r--r--cpukit/rtems/src/semsetpriority.c116
-rw-r--r--cpukit/sapi/include/confdefs.h14
-rw-r--r--cpukit/score/Makefile.am2
-rw-r--r--cpukit/score/include/rtems/score/mrsp.h133
-rw-r--r--cpukit/score/include/rtems/score/mrspimpl.h283
-rw-r--r--cpukit/score/include/rtems/score/schedulerimpl.h10
-rw-r--r--cpukit/score/preinstall.am8
-rw-r--r--doc/user/Makefile.am2
-rw-r--r--doc/user/conf.t32
-rw-r--r--doc/user/sem.t213
-rw-r--r--testsuites/smptests/Makefile.am1
-rw-r--r--testsuites/smptests/configure.ac1
-rw-r--r--testsuites/smptests/smpmrsp01/Makefile.am19
-rw-r--r--testsuites/smptests/smpmrsp01/init.c698
-rw-r--r--testsuites/smptests/smpmrsp01/smpmrsp01.doc15
-rw-r--r--testsuites/smptests/smpmrsp01/smpmrsp01.scn147
-rw-r--r--testsuites/sptests/Makefile.am1
-rw-r--r--testsuites/sptests/configure.ac1
-rw-r--r--testsuites/sptests/spmrsp01/Makefile.am19
-rw-r--r--testsuites/sptests/spmrsp01/init.c329
-rw-r--r--testsuites/sptests/spmrsp01/spmrsp01.doc13
-rw-r--r--testsuites/sptests/spmrsp01/spmrsp01.scn7
32 files changed, 2251 insertions, 16 deletions
diff --git a/cpukit/rtems/Makefile.am b/cpukit/rtems/Makefile.am
index 4b84fa1343..eb9b16ee04 100644
--- a/cpukit/rtems/Makefile.am
+++ b/cpukit/rtems/Makefile.am
@@ -206,6 +206,7 @@ librtems_a_SOURCES += src/semrelease.c
librtems_a_SOURCES += src/semflush.c
librtems_a_SOURCES += src/semtranslatereturncode.c
librtems_a_SOURCES += src/semdata.c
+librtems_a_SOURCES += src/semsetpriority.c
## EVENT_C_FILES
librtems_a_SOURCES += src/event.c
diff --git a/cpukit/rtems/include/rtems/rtems/attr.h b/cpukit/rtems/include/rtems/rtems/attr.h
index d326539714..7e8fa4818a 100644
--- a/cpukit/rtems/include/rtems/rtems/attr.h
+++ b/cpukit/rtems/include/rtems/rtems/attr.h
@@ -139,6 +139,20 @@ typedef uint32_t rtems_attribute;
*/
#define RTEMS_PRIORITY_CEILING 0x00000080
+/**
+ * This attribute constant indicates that the Classic API Semaphore instance
+ * created will NOT use the Multiprocessor Resource Sharing Protocol.
+ */
+#define RTEMS_NO_MULTIPROCESSOR_RESOURCE_SHARING 0x00000000
+
+/**
+ * This attribute constant indicates that the Classic API Semaphore instance
+ * created will use the Multiprocessor Resource Sharing Protocol.
+ *
+ * @note The semaphore instance must be a binary semaphore.
+ */
+#define RTEMS_MULTIPROCESSOR_RESOURCE_SHARING 0x00000100
+
/******************** RTEMS Barrier Specific Attributes ********************/
/**
diff --git a/cpukit/rtems/include/rtems/rtems/attrimpl.h b/cpukit/rtems/include/rtems/rtems/attrimpl.h
index 0f78c44672..a32c37096d 100644
--- a/cpukit/rtems/include/rtems/rtems/attrimpl.h
+++ b/cpukit/rtems/include/rtems/rtems/attrimpl.h
@@ -185,6 +185,23 @@ RTEMS_INLINE_ROUTINE bool _Attributes_Is_inherit_priority(
}
/**
+ * @brief Returns true if the attribute set has at most one protocol, and false
+ * otherwise.
+ *
+ * The protocols are RTEMS_INHERIT_PRIORITY, RTEMS_PRIORITY_CEILING and
+ * RTEMS_MULTIPROCESSOR_RESOURCE_SHARING.
+ */
+RTEMS_INLINE_ROUTINE bool _Attributes_Has_at_most_one_protocol(
+ rtems_attribute attribute_set
+)
+{
+ attribute_set &= RTEMS_INHERIT_PRIORITY | RTEMS_PRIORITY_CEILING
+ | RTEMS_MULTIPROCESSOR_RESOURCE_SHARING;
+
+ return ( attribute_set & ( attribute_set - 1 ) ) == 0;
+}
+
+/**
* @brief Checks if the priority ceiling attribute
* is enabled in the attribute_set
*
@@ -199,6 +216,20 @@ RTEMS_INLINE_ROUTINE bool _Attributes_Is_priority_ceiling(
}
/**
+ * @brief Checks if the Multiprocessor Resource Sharing Protocol attribute
+ * is enabled in the attribute_set
+ *
+ * This function returns TRUE if the Multiprocessor Resource Sharing Protocol
+ * attribute is enabled in the attribute_set and FALSE otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Attributes_Is_multiprocessor_resource_sharing(
+ rtems_attribute attribute_set
+)
+{
+ return ( attribute_set & RTEMS_MULTIPROCESSOR_RESOURCE_SHARING ) != 0;
+}
+
+/**
* @brief Checks if the barrier automatic release
* attribute is enabled in the attribute_set
*
diff --git a/cpukit/rtems/include/rtems/rtems/sem.h b/cpukit/rtems/include/rtems/rtems/sem.h
index 782314d163..2442010113 100644
--- a/cpukit/rtems/include/rtems/rtems/sem.h
+++ b/cpukit/rtems/include/rtems/rtems/sem.h
@@ -38,6 +38,7 @@
#include <rtems/score/coremutex.h>
#include <rtems/score/object.h>
#include <rtems/score/coresem.h>
+#include <rtems/score/mrsp.h>
#ifdef __cplusplus
extern "C" {
@@ -88,6 +89,10 @@ typedef struct {
* API Semaphore instance.
*/
CORE_semaphore_Control semaphore;
+
+#if defined(RTEMS_SMP)
+ MRSP_Control mrsp;
+#endif
} Core_control;
} Semaphore_Control;
@@ -208,6 +213,47 @@ rtems_status_code rtems_semaphore_flush(
rtems_id id
);
+/**
+ * @brief Sets the priority value with respect to the specified scheduler of a
+ * semaphore.
+ *
+ * The special priority value @ref RTEMS_CURRENT_PRIORITY can be used to get
+ * the current priority value without changing it.
+ *
+ * The interpretation of the priority value depends on the protocol of the
+ * semaphore object.
+ *
+ * - The Multiprocessor Resource Sharing Protocol needs a ceiling priority per
+ * scheduler instance. This operation can be used to specify these priority
+ * values.
+ * - For the Priority Ceiling Protocol the ceiling priority is used with this
+ * operation.
+ * - For other protocols this operation is not defined.
+ *
+ * @param[in] semaphore_id Identifier of the semaphore.
+ * @param[in] scheduler_id Identifier of the scheduler.
+ * @param[in] new_priority The new priority value. Use
+ * @ref RTEMS_CURRENT_PRIORITY to not set a new priority and only get the
+ * current priority.
+ * @param[out] old_priority Reference to store the old priority value.
+ *
+ * @retval RTEMS_SUCCESSFUL Successful operation.
+ * @retval RTEMS_INVALID_ID Invalid semaphore or scheduler identifier.
+ * @retval RTEMS_INVALID_ADDRESS The old priority reference is @c NULL.
+ * @retval RTEMS_INVALID_PRIORITY The new priority value is invalid.
+ * @retval RTEMS_NOT_DEFINED The set priority operation is not defined for the
+ * protocol of this semaphore object.
+ * @retval RTEMS_ILLEGAL_ON_REMOTE_OBJECT Not supported for remote semaphores.
+ *
+ * @see rtems_scheduler_ident() and rtems_task_set_priority().
+ */
+rtems_status_code rtems_semaphore_set_priority(
+ rtems_id semaphore_id,
+ rtems_id scheduler_id,
+ rtems_task_priority new_priority,
+ rtems_task_priority *old_priority
+);
+
/**@}*/
#ifdef __cplusplus
diff --git a/cpukit/rtems/include/rtems/rtems/semimpl.h b/cpukit/rtems/include/rtems/rtems/semimpl.h
index 51da4cdbcf..e0a35a2e33 100644
--- a/cpukit/rtems/include/rtems/rtems/semimpl.h
+++ b/cpukit/rtems/include/rtems/rtems/semimpl.h
@@ -20,6 +20,7 @@
#include <rtems/rtems/sem.h>
#include <rtems/score/coremuteximpl.h>
#include <rtems/score/coresemimpl.h>
+#include <rtems/score/mrspimpl.h>
#ifdef __cplusplus
extern "C" {
@@ -92,6 +93,14 @@ _Semaphore_Translate_core_mutex_return_code(
return _Semaphore_Translate_core_mutex_return_code_[status];
}
+#if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE rtems_status_code
+_Semaphore_Translate_MRSP_status_code( MRSP_Status mrsp_status )
+{
+ return (rtems_status_code) mrsp_status;
+}
+#endif
+
/**
* @brief Semaphore Translate Core Semaphore Return Code
*
diff --git a/cpukit/rtems/src/semcreate.c b/cpukit/rtems/src/semcreate.c
index fb597d1cbd..5e93e02632 100644
--- a/cpukit/rtems/src/semcreate.c
+++ b/cpukit/rtems/src/semcreate.c
@@ -78,12 +78,19 @@ rtems_status_code rtems_semaphore_create(
return RTEMS_MP_NOT_CONFIGURED;
if ( _Attributes_Is_inherit_priority( attribute_set ) ||
- _Attributes_Is_priority_ceiling( attribute_set ) )
+ _Attributes_Is_priority_ceiling( attribute_set ) ||
+ _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) )
return RTEMS_NOT_DEFINED;
} else
#endif
+ if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) &&
+ !( _Attributes_Is_binary_semaphore( attribute_set ) &&
+ !_Attributes_Is_priority( attribute_set ) ) ) {
+ return RTEMS_NOT_DEFINED;
+ }
+
if ( _Attributes_Is_inherit_priority( attribute_set ) ||
_Attributes_Is_priority_ceiling( attribute_set ) ) {
@@ -93,13 +100,22 @@ rtems_status_code rtems_semaphore_create(
}
- if ( _Attributes_Is_inherit_priority( attribute_set ) &&
- _Attributes_Is_priority_ceiling( attribute_set ) )
+ if ( !_Attributes_Has_at_most_one_protocol( attribute_set ) )
return RTEMS_NOT_DEFINED;
if ( !_Attributes_Is_counting_semaphore( attribute_set ) && ( count > 1 ) )
return RTEMS_INVALID_NUMBER;
+#if !defined(RTEMS_SMP)
+ /*
+ * On uni-processor configurations the Multiprocessor Resource Sharing
+ * Protocol is equivalent to the Priority Ceiling Protocol.
+ */
+ if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
+ attribute_set |= RTEMS_PRIORITY_CEILING | RTEMS_PRIORITY;
+ }
+#endif
+
the_semaphore = _Semaphore_Allocate();
if ( !the_semaphore ) {
@@ -144,6 +160,22 @@ rtems_status_code rtems_semaphore_create(
&the_semaphore_attr,
count
);
+#if defined(RTEMS_SMP)
+ } else if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
+ MRSP_Status mrsp_status = _MRSP_Initialize(
+ &the_semaphore->Core_control.mrsp,
+ priority_ceiling,
+ _Thread_Get_executing(),
+ count != 1
+ );
+
+ if ( mrsp_status != MRSP_SUCCESSFUL ) {
+ _Semaphore_Free( the_semaphore );
+ _Objects_Allocator_unlock();
+
+ return _Semaphore_Translate_MRSP_status_code( mrsp_status );
+ }
+#endif
} else {
/*
* It is either simple binary semaphore or a more powerful mutex
diff --git a/cpukit/rtems/src/semdelete.c b/cpukit/rtems/src/semdelete.c
index 6e7c5eafad..52bb14e33c 100644
--- a/cpukit/rtems/src/semdelete.c
+++ b/cpukit/rtems/src/semdelete.c
@@ -43,6 +43,7 @@ rtems_status_code rtems_semaphore_delete(
{
Semaphore_Control *the_semaphore;
Objects_Locations location;
+ rtems_attribute attribute_set;
_Objects_Allocator_lock();
@@ -50,10 +51,22 @@ rtems_status_code rtems_semaphore_delete(
switch ( location ) {
case OBJECTS_LOCAL:
- if ( !_Attributes_Is_counting_semaphore(the_semaphore->attribute_set) ) {
+ attribute_set = the_semaphore->attribute_set;
+#if defined(RTEMS_SMP)
+ if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
+ MRSP_Status mrsp_status = _MRSP_Destroy(
+ &the_semaphore->Core_control.mrsp
+ );
+ if ( mrsp_status != MRSP_SUCCESSFUL ) {
+ _Objects_Put( &the_semaphore->Object );
+ _Objects_Allocator_unlock();
+ return _Semaphore_Translate_MRSP_status_code( mrsp_status );
+ }
+ } else
+#endif
+ if ( !_Attributes_Is_counting_semaphore( attribute_set ) ) {
if ( _CORE_mutex_Is_locked( &the_semaphore->Core_control.mutex ) &&
- !_Attributes_Is_simple_binary_semaphore(
- the_semaphore->attribute_set ) ) {
+ !_Attributes_Is_simple_binary_semaphore( attribute_set ) ) {
_Objects_Put( &the_semaphore->Object );
_Objects_Allocator_unlock();
return RTEMS_RESOURCE_IN_USE;
@@ -74,7 +87,7 @@ rtems_status_code rtems_semaphore_delete(
_Objects_Close( &_Semaphore_Information, &the_semaphore->Object );
#if defined(RTEMS_MULTIPROCESSING)
- if ( _Attributes_Is_global( the_semaphore->attribute_set ) ) {
+ if ( _Attributes_Is_global( attribute_set ) ) {
_Objects_MP_Close( &_Semaphore_Information, the_semaphore->Object.id );
diff --git a/cpukit/rtems/src/semflush.c b/cpukit/rtems/src/semflush.c
index f73c92949d..b9b1ec6992 100644
--- a/cpukit/rtems/src/semflush.c
+++ b/cpukit/rtems/src/semflush.c
@@ -43,12 +43,20 @@ rtems_status_code rtems_semaphore_flush(
{
Semaphore_Control *the_semaphore;
Objects_Locations location;
+ rtems_attribute attribute_set;
the_semaphore = _Semaphore_Get( id, &location );
switch ( location ) {
case OBJECTS_LOCAL:
- if ( !_Attributes_Is_counting_semaphore(the_semaphore->attribute_set) ) {
+ attribute_set = the_semaphore->attribute_set;
+#if defined(RTEMS_SMP)
+ if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
+ _Objects_Put( &the_semaphore->Object );
+ return RTEMS_NOT_DEFINED;
+ } else
+#endif
+ if ( !_Attributes_Is_counting_semaphore( attribute_set ) ) {
_CORE_mutex_Flush(
&the_semaphore->Core_control.mutex,
SEND_OBJECT_WAS_DELETED,
diff --git a/cpukit/rtems/src/semobtain.c b/cpukit/rtems/src/semobtain.c
index c9433ca6d9..3608a00053 100644
--- a/cpukit/rtems/src/semobtain.c
+++ b/cpukit/rtems/src/semobtain.c
@@ -41,6 +41,7 @@ rtems_status_code rtems_semaphore_obtain(
Objects_Locations location;
ISR_Level level;
Thread_Control *executing;
+ rtems_attribute attribute_set;
bool wait;
the_semaphore = _Semaphore_Get_interrupt_disable( id, &location, &level );
@@ -48,8 +49,24 @@ rtems_status_code rtems_semaphore_obtain(
case OBJECTS_LOCAL:
executing = _Thread_Executing;
+ attribute_set = the_semaphore->attribute_set;
wait = !_Options_Is_no_wait( option_set );
- if ( !_Attributes_Is_counting_semaphore(the_semaphore->attribute_set) ) {
+#if defined(RTEMS_SMP)
+ if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
+ MRSP_Status mrsp_status;
+
+ _ISR_Enable( level );
+ mrsp_status = _MRSP_Obtain(
+ &the_semaphore->Core_control.mrsp,
+ executing,
+ wait,
+ timeout
+ );
+ _Objects_Put_for_get_isr_disable( &the_semaphore->Object );
+ return _Semaphore_Translate_MRSP_status_code( mrsp_status );
+ } else
+#endif
+ if ( !_Attributes_Is_counting_semaphore( attribute_set ) ) {
_CORE_mutex_Seize(
&the_semaphore->Core_control.mutex,
executing,
diff --git a/cpukit/rtems/src/semrelease.c b/cpukit/rtems/src/semrelease.c
index ff4e792b92..2c4be04c78 100644
--- a/cpukit/rtems/src/semrelease.c
+++ b/cpukit/rtems/src/semrelease.c
@@ -73,12 +73,24 @@ rtems_status_code rtems_semaphore_release(
Objects_Locations location;
CORE_mutex_Status mutex_status;
CORE_semaphore_Status semaphore_status;
+ rtems_attribute attribute_set;
the_semaphore = _Semaphore_Get( id, &location );
switch ( location ) {
case OBJECTS_LOCAL:
- if ( !_Attributes_Is_counting_semaphore(the_semaphore->attribute_set) ) {
+ attribute_set = the_semaphore->attribute_set;
+#if defined(RTEMS_SMP)
+ if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
+ MRSP_Status mrsp_status = _MRSP_Release(
+ &the_semaphore->Core_control.mrsp,
+ _Thread_Get_executing()
+ );
+ _Objects_Put( &the_semaphore->Object );
+ return _Semaphore_Translate_MRSP_status_code( mrsp_status );
+ } else
+#endif
+ if ( !_Attributes_Is_counting_semaphore( attribute_set ) ) {
mutex_status = _CORE_mutex_Surrender(
&the_semaphore->Core_control.mutex,
id,
diff --git a/cpukit/rtems/src/semsetpriority.c b/cpukit/rtems/src/semsetpriority.c
new file mode 100644
index 0000000000..b5dd1db101
--- /dev/null
+++ b/cpukit/rtems/src/semsetpriority.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#if HAVE_CONFIG_H
+ #include "config.h"
+#endif
+
+#include <rtems/rtems/semimpl.h>
+#include <rtems/rtems/attrimpl.h>
+#include <rtems/rtems/tasksimpl.h>
+#include <rtems/score/schedulerimpl.h>
+
+static rtems_status_code _Semaphore_Set_priority(
+ Semaphore_Control *the_semaphore,
+ rtems_id scheduler_id,
+ rtems_task_priority new_priority,
+ rtems_task_priority *old_priority_p
+)
+{
+ rtems_status_code sc;
+ rtems_attribute attribute_set = the_semaphore->attribute_set;
+ rtems_task_priority old_priority;
+
+ new_priority = _RTEMS_tasks_Priority_to_Core( new_priority );
+
+#if defined(RTEMS_SMP)
+ if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
+ MRSP_Control *mrsp = &the_semaphore->Core_control.mrsp;
+ uint32_t scheduler_index = _Scheduler_Get_index_by_id( scheduler_id );
+
+ old_priority = _MRSP_Get_ceiling_priority( mrsp, scheduler_index );
+
+ if ( new_priority != RTEMS_CURRENT_PRIORITY ) {
+ _MRSP_Set_ceiling_priority( mrsp, scheduler_index, new_priority );
+ }
+
+ sc = RTEMS_SUCCESSFUL;
+ } else
+#endif
+ if ( _Attributes_Is_priority_ceiling( attribute_set ) ) {
+ CORE_mutex_Control *mutex = &the_semaphore->Core_control.mutex;
+
+ old_priority = mutex->Attributes.priority_ceiling;
+
+ if ( new_priority != RTEMS_CURRENT_PRIORITY ) {
+ mutex->Attributes.priority_ceiling = new_priority;
+ }
+
+ sc = RTEMS_SUCCESSFUL;
+ } else {
+ old_priority = 0;
+
+ sc = RTEMS_NOT_DEFINED;
+ }
+
+ *old_priority_p = _RTEMS_tasks_Priority_from_Core( old_priority );
+
+ _Objects_Put( &the_semaphore->Object );
+
+ return sc;
+}
+
+rtems_status_code rtems_semaphore_set_priority(
+ rtems_id semaphore_id,
+ rtems_id scheduler_id,
+ rtems_task_priority new_priority,
+ rtems_task_priority *old_priority
+)
+{
+ Semaphore_Control *the_semaphore;
+ Objects_Locations location;
+
+ if ( new_priority != RTEMS_CURRENT_PRIORITY &&
+ !_RTEMS_tasks_Priority_is_valid( new_priority ) ) {
+ return RTEMS_INVALID_PRIORITY;
+ }
+
+ if ( old_priority == NULL ) {
+ return RTEMS_INVALID_ADDRESS;
+ }
+
+ if ( !_Scheduler_Is_id_valid( scheduler_id ) ) {
+ return RTEMS_INVALID_ID;
+ }
+
+ the_semaphore = _Semaphore_Get( semaphore_id, &location );
+ switch ( location ) {
+ case OBJECTS_LOCAL:
+ return _Semaphore_Set_priority(
+ the_semaphore,
+ scheduler_id,
+ new_priority,
+ old_priority
+ );
+#if defined(RTEMS_MULTIPROCESSING)
+ case OBJECTS_REMOTE:
+ _Thread_Dispatch();
+ return RTEMS_ILLEGAL_ON_REMOTE_OBJECT;
+#endif
+ case OBJECTS_ERROR:
+ break;
+ }
+
+ return RTEMS_INVALID_ID;
+}
diff --git a/cpukit/sapi/include/confdefs.h b/cpukit/sapi/include/confdefs.h
index 1c97d153ba..6978277790 100644
--- a/cpukit/sapi/include/confdefs.h
+++ b/cpukit/sapi/include/confdefs.h
@@ -1792,6 +1792,17 @@ const rtems_libio_helper rtems_fs_init_helper =
CONFIGURE_SEMAPHORES_FOR_FILE_SYSTEMS + \
CONFIGURE_NETWORKING_SEMAPHORES)
+ #if !defined(RTEMS_SMP) || \
+ !defined(CONFIGURE_MAXIMUM_MRSP_SEMAPHORES)
+ #define CONFIGURE_MEMORY_FOR_MRSP_SEMAPHORES 0
+ #else
+ #define CONFIGURE_MEMORY_FOR_MRSP_SEMAPHORES \
+ CONFIGURE_MAXIMUM_MRSP_SEMAPHORES * \
+ _Configure_From_workspace( \
+ RTEMS_ARRAY_SIZE(_Scheduler_Table) * sizeof(Priority_Control) \
+ )
+ #endif
+
/*
* If there are no user or support semaphores defined, then we can assume
* that no memory need be allocated at all for semaphores.
@@ -1800,7 +1811,8 @@ const rtems_libio_helper rtems_fs_init_helper =
#define CONFIGURE_MEMORY_FOR_SEMAPHORES(_semaphores) 0
#else
#define CONFIGURE_MEMORY_FOR_SEMAPHORES(_semaphores) \
- _Configure_Object_RAM(_semaphores, sizeof(Semaphore_Control) )
+ _Configure_Object_RAM(_semaphores, sizeof(Semaphore_Control) ) + \
+ CONFIGURE_MEMORY_FOR_MRSP_SEMAPHORES
#endif
#ifndef CONFIGURE_MAXIMUM_MESSAGE_QUEUES
diff --git a/cpukit/score/Makefile.am b/cpukit/score/Makefile.am
index 7c426025ea..b9a9f28514 100644
--- a/cpukit/score/Makefile.am
+++ b/cpukit/score/Makefile.am
@@ -39,6 +39,8 @@ include_rtems_score_HEADERS += include/rtems/score/isr.h
include_rtems_score_HEADERS += include/rtems/score/isrlevel.h
include_rtems_score_HEADERS += include/rtems/score/isrlock.h
include_rtems_score_HEADERS += include/rtems/score/freechain.h
+include_rtems_score_HEADERS += include/rtems/score/mrsp.h
+include_rtems_score_HEADERS += include/rtems/score/mrspimpl.h
include_rtems_score_HEADERS += include/rtems/score/object.h
include_rtems_score_HEADERS += include/rtems/score/objectimpl.h
include_rtems_score_HEADERS += include/rtems/score/onceimpl.h
diff --git a/cpukit/score/include/rtems/score/mrsp.h b/cpukit/score/include/rtems/score/mrsp.h
new file mode 100644
index 0000000000..407d5efecd
--- /dev/null
+++ b/cpukit/score/include/rtems/score/mrsp.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_MRSP_H
+#define _RTEMS_SCORE_MRSP_H
+
+#include <rtems/score/cpuopts.h>
+
+#if defined(RTEMS_SMP)
+
+#include <rtems/score/atomic.h>
+#include <rtems/score/chain.h>
+#include <rtems/score/thread.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreMRSP Multiprocessor Resource Sharing Protocol Handler
+ *
+ * @ingroup Score
+ *
+ * @brief Multiprocessor Resource Sharing Protocol (MrsP).
+ *
+ * The Multiprocessor Resource Sharing Protocol (MrsP) is defined in A. Burns
+ * and A.J. Wellings, A Schedulability Compatible Multiprocessor Resource
+ * Sharing Protocol - MrsP, Proceedings of the 25th Euromicro Conference on
+ * Real-Time Systems (ECRTS 2013), July 2013. It is a generalization of the
+ * Priority Ceiling Protocol to SMP systems. Each MrsP semaphore uses a
+ * ceiling priority per scheduler instance. A task obtaining or owning a MrsP
+ * semaphore will execute with the ceiling priority for its scheduler instance
+ * as specified by the MrsP semaphore object. Tasks waiting to get ownership
+ * of a MrsP semaphore will not relinquish the processor voluntarily. In case
+ * the owner of a MrsP semaphore gets preempted it can ask all tasks waiting
+ * for this semaphore to help out and temporarily borrow the right to execute
+ * on one of their assigned processors.
+ *
+ * @{
+ */
+
+/**
+ * @brief MrsP status code.
+ *
+ * The values are chosen to directly map to RTEMS status codes. In case this
+ * implementation is used for other APIs, then for example the errno values can
+ * be added with a bit shift.
+ */
+typedef enum {
+ MRSP_SUCCESSFUL = 0,
+ MRSP_TIMEOUT = 6,
+ MRSP_INVALID_NUMBER = 10,
+ MRSP_RESOUCE_IN_USE = 12,
+ MRSP_UNSATISFIED = 13,
+ MRSP_INVALID_PRIORITY = 19,
+ MRSP_NOT_OWNER_OF_RESOURCE = 23,
+ MRSP_NO_MEMORY = 26
+} MRSP_Status;
+
+/**
+ * @brief MrsP rival.
+ *
+ * The rivals are used by threads waiting for resource ownership. They are
+ * registered in the MRSP control block.
+ */
+typedef struct {
+ /**
+ * @brief The node for registration in the MRSP rival chain.
+ *
+ * @see MRSP_Control::Rivals.
+ */
+ Chain_Node Node;
+
+ /**
+ * @brief Identification of the rival thread.
+ */
+ Thread_Control *thread;
+
+ /**
+ * @brief The rival state.
+ *
+ * Initially no state bits are set (MRSP_RIVAL_STATE_WAITING). The rival
+ * will busy wait until a state change happens. This can be
+ * MRSP_RIVAL_STATE_NEW_OWNER or MRSP_RIVAL_STATE_TIMEOUT.
+ */
+ Atomic_Uint state;
+} MRSP_Rival;
+
+/**
+ * @brief MrsP control block.
+ */
+typedef struct {
+ /**
+ * @brief The owner of the MRSP resource.
+ *
+ * In case this field is @c NULL, then this MRSP resource has currently no
+ * owner.
+ */
+ Thread_Control *owner;
+
+ /**
+ * @brief A chain of MrsP rivals waiting for resource ownership.
+ *
+ * @see MRSP_Rival::Node.
+ */
+ Chain_Control Rivals;
+
+ /**
+ * @brief One ceiling priority per scheduler instance.
+ */
+ Priority_Control *ceiling_priorities;
+} MRSP_Control;
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RTEMS_SMP */
+
+#endif /* _RTEMS_SCORE_MRSP_H */
diff --git a/cpukit/score/include/rtems/score/mrspimpl.h b/cpukit/score/include/rtems/score/mrspimpl.h
new file mode 100644
index 0000000000..76d3bc898d
--- /dev/null
+++ b/cpukit/score/include/rtems/score/mrspimpl.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_MRSPIMPL_H
+#define _RTEMS_SCORE_MRSPIMPL_H
+
+#include <rtems/score/mrsp.h>
+
+#if defined(RTEMS_SMP)
+
+#include <rtems/score/assert.h>
+#include <rtems/score/chainimpl.h>
+#include <rtems/score/schedulerimpl.h>
+#include <rtems/score/watchdogimpl.h>
+#include <rtems/score/wkspace.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup ScoreMRSP
+ *
+ * @{
+ */
+
+#define MRSP_RIVAL_STATE_WAITING 0x0U
+
+#define MRSP_RIVAL_STATE_NEW_OWNER 0x1U
+
+#define MRSP_RIVAL_STATE_TIMEOUT 0x2U
+
+RTEMS_INLINE_ROUTINE void _MRSP_Claim_ownership(
+ MRSP_Control *mrsp,
+ Thread_Control *new_owner,
+ Priority_Control ceiling_priority
+)
+{
+ ++new_owner->resource_count;
+ mrsp->owner = new_owner;
+ _Thread_Change_priority( new_owner, ceiling_priority, false );
+}
+
+RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Initialize(
+ MRSP_Control *mrsp,
+ Priority_Control ceiling_priority,
+ Thread_Control *executing,
+ bool initially_locked
+)
+{
+ uint32_t scheduler_count = _Scheduler_Count;
+ uint32_t i;
+
+ if ( initially_locked ) {
+ return MRSP_INVALID_NUMBER;
+ }
+
+ mrsp->ceiling_priorities = _Workspace_Allocate(
+ sizeof( *mrsp->ceiling_priorities ) * scheduler_count
+ );
+ if ( mrsp->ceiling_priorities == NULL ) {
+ return MRSP_NO_MEMORY;
+ }
+
+ for ( i = 0 ; i < scheduler_count ; ++i ) {
+ mrsp->ceiling_priorities[ i ] = ceiling_priority;
+ }
+
+ mrsp->owner = NULL;
+ _Chain_Initialize_empty( &mrsp->Rivals );
+
+ return MRSP_SUCCESSFUL;
+}
+
+RTEMS_INLINE_ROUTINE Priority_Control _MRSP_Get_ceiling_priority(
+ MRSP_Control *mrsp,
+ uint32_t scheduler_index
+)
+{
+ return mrsp->ceiling_priorities[ scheduler_index ];
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Set_ceiling_priority(
+ MRSP_Control *mrsp,
+ uint32_t scheduler_index,
+ Priority_Control ceiling_priority
+)
+{
+ mrsp->ceiling_priorities[ scheduler_index ] = ceiling_priority;
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Restore_priority( Thread_Control *thread )
+{
+ _Thread_Change_priority( thread, thread->real_priority, true );
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Add_state(
+ MRSP_Rival *rival,
+ unsigned int state
+)
+{
+ _Atomic_Fetch_or_uint( &rival->state, state, ATOMIC_ORDER_RELEASE );
+}
+
+RTEMS_INLINE_ROUTINE void _MRSP_Timeout(
+ Objects_Id id,
+ void *arg
+)
+{
+ MRSP_Rival *rival = arg;
+
+ (void) id;
+
+ _MRSP_Add_state( rival, MRSP_RIVAL_STATE_TIMEOUT );
+}
+
+RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
+ MRSP_Control *mrsp,
+ Thread_Control *executing,
+ Priority_Control ceiling_priority,
+ Watchdog_Interval timeout
+)
+{
+ MRSP_Status status;
+ MRSP_Rival rival;
+ bool previous_life_protection;
+ unsigned int state;
+
+ _Thread_Change_priority( executing, ceiling_priority, false );
+
+ rival.thread = executing;
+ _Atomic_Init_uint( &rival.state, MRSP_RIVAL_STATE_WAITING );
+ _Chain_Append_unprotected( &mrsp->Rivals, &rival.Node );
+
+ if ( timeout > 0 ) {
+ _Watchdog_Initialize(
+ &executing->Timer,
+ _MRSP_Timeout,
+ 0,
+ &rival
+ );
+ _Watchdog_Insert_ticks( &executing->Timer, timeout );
+ }
+
+ previous_life_protection = _Thread_Set_life_protection( true );
+ _Thread_Enable_dispatch();
+
+ _Assert( _Debug_Is_thread_dispatching_allowed() );
+
+ while (
+ _Atomic_Load_uint( &rival.state, ATOMIC_ORDER_ACQUIRE )
+ == MRSP_RIVAL_STATE_WAITING
+ ) {
+ /* Wait for state change */
+ }
+
+ _Thread_Disable_dispatch();
+ _Thread_Set_life_protection( previous_life_protection );
+
+ if ( timeout > 0 ) {
+ _Watchdog_Remove( &executing->Timer );
+ }
+
+ _Chain_Extract_unprotected( &rival.Node );
+ state = _Atomic_Load_uint( &rival.state, ATOMIC_ORDER_RELAXED );
+
+ if ( ( state & MRSP_RIVAL_STATE_NEW_OWNER ) != 0 ) {
+ ++executing->resource_count;
+
+ status = MRSP_SUCCESSFUL;
+ } else {
+ if ( executing->resource_count == 0 ) {
+ _MRSP_Restore_priority( executing );
+ }
+
+ status = MRSP_TIMEOUT;
+ }
+
+ return status;
+}
+
+RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Obtain(
+ MRSP_Control *mrsp,
+ Thread_Control *executing,
+ bool wait,
+ Watchdog_Interval timeout
+)
+{
+ MRSP_Status status;
+ const Scheduler_Control *scheduler = _Scheduler_Get( executing );
+ uint32_t scheduler_index = _Scheduler_Get_index( scheduler );
+ Priority_Control ceiling_priority =
+ _MRSP_Get_ceiling_priority( mrsp, scheduler_index );
+ bool priority_ok = !_Scheduler_Is_priority_higher_than(
+ scheduler,
+ executing->current_priority,
+ ceiling_priority
+ );
+
+ if ( !priority_ok) {
+ return MRSP_INVALID_PRIORITY;
+ }
+
+ if ( mrsp->owner == NULL ) {
+ _MRSP_Claim_ownership( mrsp, executing, ceiling_priority );
+ status = MRSP_SUCCESSFUL;
+ } else if ( mrsp->owner == executing ) {
+ status = MRSP_UNSATISFIED;
+ } else if ( wait ) {
+ status = _MRSP_Wait_for_ownership(
+ mrsp,
+ executing,
+ ceiling_priority,
+ timeout
+ );
+ } else {
+ status = MRSP_UNSATISFIED;
+ }
+
+ return status;
+}
+
+RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Release(
+ MRSP_Control *mrsp,
+ Thread_Control *executing
+)
+{
+ uint32_t resource_count = executing->resource_count;
+
+ if ( mrsp->owner != executing ) {
+ return MRSP_NOT_OWNER_OF_RESOURCE;
+ }
+
+ if ( resource_count == 1 ) {
+ executing->resource_count = 0;
+ _MRSP_Restore_priority( executing );
+ } else {
+ executing->resource_count = resource_count - 1;
+ }
+
+ if ( _Chain_Is_empty( &mrsp->Rivals ) ) {
+ mrsp->owner = NULL;
+ } else {
+ MRSP_Rival *rival = (MRSP_Rival *) _Chain_First( &mrsp->Rivals );
+
+ mrsp->owner = rival->thread;
+ _MRSP_Add_state( rival, MRSP_RIVAL_STATE_NEW_OWNER );
+ }
+
+ return MRSP_SUCCESSFUL;
+}
+
+RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Destroy( MRSP_Control *mrsp )
+{
+ if ( mrsp->owner != NULL ) {
+ return MRSP_RESOUCE_IN_USE;
+ }
+
+ _Workspace_Free( mrsp->ceiling_priorities );
+
+ return MRSP_SUCCESSFUL;
+}
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RTEMS_SMP */
+
+#endif /* _RTEMS_SCORE_MRSPIMPL_H */
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 15a4207cfd..be841642ca 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -617,6 +617,16 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
&& _Scheduler_Get_processor_count( scheduler ) > 0;
}
+RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
+{
+ const Scheduler_Control *scheduler;
+ bool ok = _Scheduler_Get_by_id( id, &scheduler );
+
+ (void) scheduler;
+
+ return ok;
+}
+
RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
const Scheduler_Control *scheduler
)
diff --git a/cpukit/score/preinstall.am b/cpukit/score/preinstall.am
index d00e1374c6..45a0ce730f 100644
--- a/cpukit/score/preinstall.am
+++ b/cpukit/score/preinstall.am
@@ -139,6 +139,14 @@ $(PROJECT_INCLUDE)/rtems/score/freechain.h: include/rtems/score/freechain.h $(PR
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/freechain.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/freechain.h
+$(PROJECT_INCLUDE)/rtems/score/mrsp.h: include/rtems/score/mrsp.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+ $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/mrsp.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/mrsp.h
+
+$(PROJECT_INCLUDE)/rtems/score/mrspimpl.h: include/rtems/score/mrspimpl.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+ $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/mrspimpl.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/mrspimpl.h
+
$(PROJECT_INCLUDE)/rtems/score/object.h: include/rtems/score/object.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/object.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/object.h
diff --git a/doc/user/Makefile.am b/doc/user/Makefile.am
index ef91a41d63..e7743199df 100644
--- a/doc/user/Makefile.am
+++ b/doc/user/Makefile.am
@@ -96,7 +96,7 @@ sem.texi: sem.t
-n "Barrier Manager" < $< > $@
barrier.texi: barrier.t
- $(BMENU2) -p "Semaphore Manager SEMAPHORE_FLUSH - Unblock all tasks waiting on a semaphore" \
+ $(BMENU2) -p "Semaphore Manager SEMAPHORE_SET_PRIORITY - Set priority by scheduler for a semaphore" \
-u "Top" \
-n "Message Manager" < $< > $@
diff --git a/doc/user/conf.t b/doc/user/conf.t
index b518176455..d72ff06661 100644
--- a/doc/user/conf.t
+++ b/doc/user/conf.t
@@ -686,6 +686,38 @@ API Semaphores that can be concurrently active.
This object class can be configured in unlimited allocation mode.
@c
+@c === CONFIGURE_MAXIMUM_MRSP_SEMAPHORES ===
+@c
+@subsection Specify Maximum Classic API Semaphores usable with MrsP
+
+@findex CONFIGURE_MAXIMUM_MRSP_SEMAPHORES
+
+@table @b
+@item CONSTANT:
+@code{CONFIGURE_MAXIMUM_MRSP_SEMAPHORES}
+
+@item DATA TYPE:
+Unsigned integer (@code{uint32_t}).
+
+@item RANGE:
+Zero or positive.
+
+@item DEFAULT VALUE:
+The default value is 0.
+
+@end table
+
+@subheading DESCRIPTION:
+@code{CONFIGURE_MAXIMUM_MRSP_SEMAPHORES} is the
+maximum number of Classic API Semaphores using the Multiprocessor Resource
+Sharing Protocol (MrsP) that can be concurrently active.
+
+@subheading NOTES:
+This configuration option is only used on SMP configurations. On uni-processor
+configurations the Priority Ceiling Protocol is used for MrsP semaphores and
+thus no extra memory is necessary.
+
+@c
@c === CONFIGURE_MAXIMUM_MESSAGE_QUEUES ===
@c
@subsection Specify Maximum Classic API Message Queues
diff --git a/doc/user/sem.t b/doc/user/sem.t
index 9c380523ba..95fa4b90ae 100644
--- a/doc/user/sem.t
+++ b/doc/user/sem.t
@@ -24,6 +24,8 @@ semaphore manager are:
@item @code{@value{DIRPREFIX}semaphore_obtain} - Acquire a semaphore
@item @code{@value{DIRPREFIX}semaphore_release} - Release a semaphore
@item @code{@value{DIRPREFIX}semaphore_flush} - Unblock all tasks waiting on a semaphore
+@item @code{@value{DIRPREFIX}semaphore_set_priority} - Set priority by
+scheduler for a semaphore
@end itemize
@section Background
@@ -173,6 +175,22 @@ any of the semaphores the task holds. Only when the task
releases ALL of the binary semaphores it holds will its priority
be restored to the normal value.
+@subsection Multiprocessor Resource Sharing Protocol
+
+The Multiprocessor Resource Sharing Protocol (MrsP) is defined in @cite{A.
+Burns and A.J. Wellings, A Schedulability Compatible Multiprocessor Resource
+Sharing Protocol - MrsP, Proceedings of the 25th Euromicro Conference on
+Real-Time Systems (ECRTS 2013), July 2013}. It is a generalization of the
+Priority Ceiling Protocol to SMP systems. Each MrsP semaphore uses a ceiling
+priority per scheduler instance. These ceiling priorities can be specified
+with @code{rtems_semaphore_set_priority()}. A task obtaining or owning a MrsP
+semaphore will execute with the ceiling priority for its scheduler instance as
+specified by the MrsP semaphore object. Tasks waiting to get ownership of a
+MrsP semaphore will not relinquish the processor voluntarily. In case the
+owner of a MrsP semaphore gets preempted it can ask all tasks waiting for this
+semaphore to help out and temporarily borrow the right to execute on one of
+their assigned processors.
+
@subsection Building a Semaphore Attribute Set
In general, an attribute set is built by a bitwise OR
@@ -198,11 +216,17 @@ inheritance (default)
@item @code{@value{RPREFIX}INHERIT_PRIORITY} - use priority inheritance
-@item @code{@value{RPREFIX}PRIORITY_CEILING} - use priority ceiling
-
@item @code{@value{RPREFIX}NO_PRIORITY_CEILING} - do not use priority
ceiling (default)
+@item @code{@value{RPREFIX}PRIORITY_CEILING} - use priority ceiling
+
+@item @code{@value{RPREFIX}NO_MULTIPROCESSOR_RESOURCE_SHARING} - do not use
+Multiprocessor Resource Sharing Protocol (default)
+
+@item @code{@value{RPREFIX}MULTIPROCESSOR_RESOURCE_SHARING} - use
+Multiprocessor Resource Sharing Protocol
+
@item @code{@value{RPREFIX}LOCAL} - local semaphore (default)
@item @code{@value{RPREFIX}GLOBAL} - global semaphore
@@ -489,11 +513,17 @@ inheritance (default)
@item @code{@value{RPREFIX}INHERIT_PRIORITY} - use priority inheritance
-@item @code{@value{RPREFIX}PRIORITY_CEILING} - use priority ceiling
-
@item @code{@value{RPREFIX}NO_PRIORITY_CEILING} - do not use priority
ceiling (default)
+@item @code{@value{RPREFIX}PRIORITY_CEILING} - use priority ceiling
+
+@item @code{@value{RPREFIX}NO_MULTIPROCESSOR_RESOURCE_SHARING} - do not use
+Multiprocessor Resource Sharing Protocol (default)
+
+@item @code{@value{RPREFIX}MULTIPROCESSOR_RESOURCE_SHARING} - use
+Multiprocessor Resource Sharing Protocol
+
@item @code{@value{RPREFIX}LOCAL} - local semaphore (default)
@item @code{@value{RPREFIX}GLOBAL} - global semaphore
@@ -512,6 +542,11 @@ earlier discussion on this.
The total number of global objects, including semaphores, is limited by
the maximum_global_objects field in the Configuration Table.
+It is not allowed to create an initially locked MrsP semaphore and the
+@code{@value{RPREFIX}INVALID_NUMBER} status code will be returned on SMP
+configurations in this case. This prevents lock order reversal problems with
+the allocator mutex.
+
@c
@c
@c
@@ -735,6 +770,10 @@ until the semaphore is released.
A clock tick is required to support the timeout functionality of
this directive.
+It is not allowed to obtain a MrsP semaphore more than once by one task at a
+time (nested access) and the @code{@value{RPREFIX}UNSATISFIED} status code will
+be returned on SMP configurations in this case.
+
@c
@c
@c
@@ -831,6 +870,8 @@ procedure Semaphore_Flush (
@subheading DIRECTIVE STATUS CODES:
@code{@value{RPREFIX}SUCCESSFUL} - semaphore released successfully@*
@code{@value{RPREFIX}INVALID_ID} - invalid semaphore id@*
+@code{@value{RPREFIX}NOT_DEFINED} - operation not defined for the protocol of
+the semaphore@*
@code{@value{RPREFIX}ILLEGAL_ON_REMOTE_OBJECT} - not supported for remote semaphores
@subheading DESCRIPTION:
@@ -858,4 +899,168 @@ If the task to be unblocked resides on a different
node from the semaphore, then the waiting task is
unblocked, and the proxy used to represent the task is reclaimed.
+It is not allowed to flush a MrsP semaphore and the
+@code{@value{RPREFIX}NOT_DEFINED} status code will be returned on SMP
+configurations in this case.
+
+@c
+@c
+@c
+@page
+@subsection SEMAPHORE_SET_PRIORITY - Set priority by scheduler for a semaphore
+
+@cindex set priority by scheduler for a semaphore
+
+@subheading CALLING SEQUENCE:
+
+@ifset is-C
+@findex rtems_semaphore_set_priority
+@example
+rtems_status_code rtems_semaphore_set_priority(
+ rtems_id semaphore_id,
+ rtems_id scheduler_id,
+ rtems_task_priority new_priority,
+ rtems_task_priority *old_priority
+);
+@end example
+@end ifset
+
+@subheading DIRECTIVE STATUS CODES:
+@code{@value{RPREFIX}SUCCESSFUL} - successful operation@*
+@code{@value{RPREFIX}INVALID_ID} - invalid semaphore or scheduler id@*
+@code{@value{RPREFIX}INVALID_ADDRESS} - @code{old_priority} is NULL@*
+@code{@value{RPREFIX}INVALID_PRIORITY} - invalid new priority value@*
+@code{@value{RPREFIX}NOT_DEFINED} - operation not defined for the protocol of
+the semaphore@*
+@code{@value{RPREFIX}ILLEGAL_ON_REMOTE_OBJECT} - not supported for remote semaphores
+
+@subheading DESCRIPTION:
+
+This directive sets the priority value with respect to the specified scheduler
+of a semaphore.
+
+The special priority value @code{RTEMS_CURRENT_PRIORITY} can be used to get the
+current priority value without changing it.
+
+The interpretation of the priority value depends on the protocol of the
+semaphore object.
+
+@itemize @bullet
+@item The Multiprocessor Resource Sharing Protocol needs a ceiling priority per
+scheduler instance. This operation can be used to specify these priority
+values.
+@item For the Priority Ceiling Protocol the ceiling priority is used with this
+operation.
+@item For other protocols this operation is not defined.
+@end itemize
+
+@subheading EXAMPLE:
+
+@example
+@group
+#include <assert.h>
+#include <stdlib.h>
+
+#include <rtems.h>
+
+#define SCHED_A rtems_build_name(' ', ' ', ' ', 'A')
+
+#define SCHED_B rtems_build_name(' ', ' ', ' ', 'B')
+
+static void Init(rtems_task_argument arg)
+@{
+ rtems_status_code sc;
+ rtems_id semaphore_id;
+ rtems_id scheduler_a_id;
+ rtems_id scheduler_b_id;
+ rtems_task_priority prio;
+
+ /* Get the scheduler identifiers */
+
+ sc = rtems_scheduler_ident(SCHED_A, &scheduler_a_id);
+ assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_scheduler_ident(SCHED_B, &scheduler_b_id);
+ assert(sc == RTEMS_SUCCESSFUL);
+
+ /* Create a MrsP semaphore object */
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &semaphore_id
+ );
+ assert(sc == RTEMS_SUCCESSFUL);
+
+ /*
+ * The ceiling priority values per scheduler are equal to the value specified
+ * for object creation.
+ */
+
+ prio = RTEMS_CURRENT_PRIORITY;
+ sc = rtems_semaphore_set_priority(semaphore_id, scheduler_a_id, prio, &prio);
+ assert(sc == RTEMS_SUCCESSFUL);
+ assert(prio == 1);
+
+ /* Check the old value and set a new ceiling priority for scheduler B */
+
+ prio = 2;
+ sc = rtems_semaphore_set_priority(semaphore_id, scheduler_b_id, prio, &prio);
+ assert(sc == RTEMS_SUCCESSFUL);
+ assert(prio == 1);
+ /* Check the ceiling priority values */
+
+ prio = RTEMS_CURRENT_PRIORITY;
+ sc = rtems_semaphore_set_priority(semaphore_id, scheduler_a_id, prio, &prio);
+ assert(sc == RTEMS_SUCCESSFUL);
+ assert(prio == 1);
+
+ prio = RTEMS_CURRENT_PRIORITY;
+ sc = rtems_semaphore_set_priority(semaphore_id, scheduler_b_id, prio, &prio);
+ assert(sc == RTEMS_SUCCESSFUL);
+ assert(prio == 2);
+
+ sc = rtems_semaphore_delete(semaphore_id);
+ assert(sc == RTEMS_SUCCESSFUL);
+
+ exit(0);
+@}
+
+#define CONFIGURE_SMP_APPLICATION
+
+#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
+#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
+
+#define CONFIGURE_MAXIMUM_TASKS 1
+#define CONFIGURE_MAXIMUM_SEMAPHORES 1
+#define CONFIGURE_MAXIMUM_MRSP_SEMAPHORES 1
+
+#define CONFIGURE_SMP_MAXIMUM_PROCESSORS 2
+
+#define CONFIGURE_SCHEDULER_SIMPLE_SMP
+
+#include <rtems/scheduler.h>
+
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(a);
+
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(b);
+
+#define CONFIGURE_SCHEDULER_CONTROLS \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(a, SCHED_A), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(b, SCHED_B)
+
+#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
+ RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
+ RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY)
+
+#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
+
+#define CONFIGURE_INIT
+
+#include <rtems/confdefs.h>
+@end group
+@end example
diff --git a/testsuites/smptests/Makefile.am b/testsuites/smptests/Makefile.am
index 4a5a78b586..8c2ac7bb06 100644
--- a/testsuites/smptests/Makefile.am
+++ b/testsuites/smptests/Makefile.am
@@ -22,6 +22,7 @@ SUBDIRS += smpload01
SUBDIRS += smplock01
SUBDIRS += smpmigration01
SUBDIRS += smpmigration02
+SUBDIRS += smpmrsp01
SUBDIRS += smpscheduler01
SUBDIRS += smpscheduler02
SUBDIRS += smpscheduler03
diff --git a/testsuites/smptests/configure.ac b/testsuites/smptests/configure.ac
index 607adbb12f..bdc166b063 100644
--- a/testsuites/smptests/configure.ac
+++ b/testsuites/smptests/configure.ac
@@ -77,6 +77,7 @@ smpload01/Makefile
smplock01/Makefile
smpmigration01/Makefile
smpmigration02/Makefile
+smpmrsp01/Makefile
smppsxaffinity01/Makefile
smppsxaffinity02/Makefile
smppsxsignal01/Makefile
diff --git a/testsuites/smptests/smpmrsp01/Makefile.am b/testsuites/smptests/smpmrsp01/Makefile.am
new file mode 100644
index 0000000000..7983434b98
--- /dev/null
+++ b/testsuites/smptests/smpmrsp01/Makefile.am
@@ -0,0 +1,19 @@
+rtems_tests_PROGRAMS = smpmrsp01
+smpmrsp01_SOURCES = init.c
+
+dist_rtems_tests_DATA = smpmrsp01.scn smpmrsp01.doc
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(top_srcdir)/../automake/compile.am
+include $(top_srcdir)/../automake/leaf.am
+
+AM_CPPFLAGS += -I$(top_srcdir)/../support/include
+
+LINK_OBJS = $(smpmrsp01_OBJECTS)
+LINK_LIBS = $(smpmrsp01_LDLIBS)
+
+smpmrsp01$(EXEEXT): $(smpmrsp01_OBJECTS) $(smpmrsp01_DEPENDENCIES)
+ @rm -f smpmrsp01$(EXEEXT)
+ $(make-exe)
+
+include $(top_srcdir)/../automake/local.am
diff --git a/testsuites/smptests/smpmrsp01/init.c b/testsuites/smptests/smpmrsp01/init.c
new file mode 100644
index 0000000000..4f6637afac
--- /dev/null
+++ b/testsuites/smptests/smpmrsp01/init.c
@@ -0,0 +1,698 @@
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifdef HAVE_CONFIG_H
+ #include "config.h"
+#endif
+
+#include <stdio.h>
+#include <inttypes.h>
+
+#include <rtems.h>
+#include <rtems/libcsupport.h>
+#include <rtems/score/smpbarrier.h>
+
+#define TESTS_USE_PRINTK
+#include "tmacros.h"
+
+const char rtems_test_name[] = "SMPMRSP 1";
+
+#define CPU_COUNT 32
+
+#define MRSP_COUNT 32
+
+typedef struct {
+ uint32_t sleep;
+ uint32_t timeout;
+ uint32_t obtain[MRSP_COUNT];
+} counter;
+
+typedef struct {
+ rtems_id main_task_id;
+ rtems_id counting_sem_id;
+ rtems_id mrsp_ids[MRSP_COUNT];
+ rtems_id scheduler_ids[CPU_COUNT];
+ rtems_id worker_ids[2 * CPU_COUNT];
+ volatile bool stop_worker[CPU_COUNT];
+ counter counters[2 * CPU_COUNT];
+ Thread_Control *worker_task;
+ SMP_barrier_Control barrier;
+} test_context;
+
+static test_context test_instance = {
+ .barrier = SMP_BARRIER_CONTROL_INITIALIZER
+};
+
+static void barrier(test_context *ctx, SMP_barrier_State *bs)
+{
+ _SMP_barrier_Wait(&ctx->barrier, bs, 2);
+}
+
+static void assert_prio(rtems_id task_id, rtems_task_priority expected_prio)
+{
+ rtems_status_code sc;
+ rtems_task_priority prio;
+
+ sc = rtems_task_set_priority(task_id, RTEMS_CURRENT_PRIORITY, &prio);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == expected_prio);
+}
+
+static void change_prio(rtems_id task_id, rtems_task_priority prio)
+{
+ rtems_status_code sc;
+
+ sc = rtems_task_set_priority(task_id, prio, &prio);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void assert_executing_worker(test_context *ctx)
+{
+ rtems_test_assert(
+ _CPU_Context_Get_is_executing(&ctx->worker_task->Registers)
+ );
+}
+
+static void obtain_and_release_worker(rtems_task_argument arg)
+{
+ test_context *ctx = &test_instance;
+ rtems_status_code sc;
+ SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
+
+ ctx->worker_task = _Thread_Get_executing();
+
+ assert_prio(RTEMS_SELF, 3);
+
+ /* Obtain with timeout (A) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, 4);
+ rtems_test_assert(sc == RTEMS_TIMEOUT);
+
+ assert_prio(RTEMS_SELF, 3);
+
+ /* Obtain with priority change and timeout (B) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, 4);
+ rtems_test_assert(sc == RTEMS_TIMEOUT);
+
+ assert_prio(RTEMS_SELF, 1);
+
+ /* Restore priority (C) */
+ barrier(ctx, &barrier_state);
+
+ /* Obtain without timeout (D) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(RTEMS_SELF, 2);
+
+ sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(RTEMS_SELF, 3);
+
+ /* Worker done (E) */
+ barrier(ctx, &barrier_state);
+
+ rtems_task_suspend(RTEMS_SELF);
+ rtems_test_assert(0);
+}
+
+static void test_mrsp_obtain_and_release(void)
+{
+ test_context *ctx = &test_instance;
+ rtems_status_code sc;
+ rtems_task_priority prio;
+ rtems_id scheduler_id;
+ SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER;
+
+ puts("test MrsP obtain and release");
+
+ /* Check executing task parameters */
+
+ sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ rtems_test_assert(ctx->scheduler_ids[0] == scheduler_id);
+
+ assert_prio(RTEMS_SELF, 2);
+
+ /* Create a MrsP semaphore object and lock it */
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &ctx->mrsp_ids[0]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(RTEMS_SELF, 2);
+
+ sc = rtems_semaphore_obtain(ctx->mrsp_ids[0], RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(RTEMS_SELF, 1);
+
+ /*
+ * The ceiling priority values per scheduler are equal to the value specified
+ * for object creation.
+ */
+
+ prio = RTEMS_CURRENT_PRIORITY;
+ sc = rtems_semaphore_set_priority(
+ ctx->mrsp_ids[0],
+ ctx->scheduler_ids[0],
+ prio,
+ &prio
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 1);
+
+ /* Check the old value and set a new ceiling priority for scheduler B */
+
+ prio = 2;
+ sc = rtems_semaphore_set_priority(
+ ctx->mrsp_ids[0],
+ ctx->scheduler_ids[1],
+ prio,
+ &prio
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 1);
+
+ /* Check the ceiling priority values */
+
+ prio = RTEMS_CURRENT_PRIORITY;
+ sc = rtems_semaphore_set_priority(
+ ctx->mrsp_ids[0],
+ ctx->scheduler_ids[0],
+ prio,
+ &prio
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 1);
+
+ prio = RTEMS_CURRENT_PRIORITY;
+ sc = rtems_semaphore_set_priority(
+ ctx->mrsp_ids[0],
+ ctx->scheduler_ids[1],
+ prio,
+ &prio
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 2);
+
+ /* Check that a thread waiting to get ownership remains executing */
+
+ sc = rtems_task_create(
+ rtems_build_name('W', 'O', 'R', 'K'),
+ 3,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->worker_ids[0]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_set_scheduler(ctx->worker_ids[0], ctx->scheduler_ids[1]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_start(ctx->worker_ids[0], obtain_and_release_worker, 0);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ /* Obtain with timeout (A) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_task_wake_after(2);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(ctx->worker_ids[0], 2);
+ assert_executing_worker(ctx);
+
+ /* Obtain with priority change and timeout (B) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_task_wake_after(2);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(ctx->worker_ids[0], 2);
+ change_prio(ctx->worker_ids[0], 1);
+ assert_executing_worker(ctx);
+
+ /* Restore priority (C) */
+ barrier(ctx, &barrier_state);
+
+ assert_prio(ctx->worker_ids[0], 1);
+ change_prio(ctx->worker_ids[0], 3);
+
+ /* Obtain without timeout (D) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_task_wake_after(2);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(ctx->worker_ids[0], 2);
+ assert_executing_worker(ctx);
+
+ sc = rtems_semaphore_release(ctx->mrsp_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ /* Worker done (E) */
+ barrier(ctx, &barrier_state);
+
+ sc = rtems_task_delete(ctx->worker_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_delete(ctx->mrsp_ids[0]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void test_mrsp_flush_error(void)
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ puts("test MrsP flush error");
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_flush(id);
+ rtems_test_assert(sc == RTEMS_NOT_DEFINED);
+
+ sc = rtems_semaphore_delete(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void test_mrsp_initially_locked_error(void)
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ puts("test MrsP initially locked error");
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 0,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &id
+ );
+ rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
+}
+
+static void test_mrsp_nested_obtain_error(void)
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ puts("test MrsP nested obtain error");
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_UNSATISFIED);
+
+ sc = rtems_semaphore_release(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_delete(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static uint32_t simple_random(uint32_t v)
+{
+ v *= 1664525;
+ v += 1013904223;
+
+ return v;
+}
+
+static rtems_interval timeout(uint32_t v)
+{
+ return (v >> 23) % 4;
+}
+
+static void load_worker(rtems_task_argument index)
+{
+ test_context *ctx = &test_instance;
+ rtems_status_code sc;
+ uint32_t v = index;
+
+ while (!ctx->stop_worker[index]) {
+ uint32_t i = (v >> 13) % MRSP_COUNT;
+
+ assert_prio(RTEMS_SELF, 3 + CPU_COUNT + index);
+
+ if ((v >> 7) % 1024 == 0) {
+ /* Give some time to the lower priority tasks */
+
+ ++ctx->counters[index].sleep;
+
+ sc = rtems_task_wake_after(1);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ } else {
+ uint32_t n = (v >> 17) % (i + 1);
+ uint32_t s;
+ uint32_t t;
+
+ /* Nested obtain */
+ for (s = 0; s <= n; ++s) {
+ uint32_t k = i - s;
+
+ sc = rtems_semaphore_obtain(ctx->mrsp_ids[k], RTEMS_WAIT, timeout(v));
+ if (sc == RTEMS_SUCCESSFUL) {
+ ++ctx->counters[index].obtain[n];
+
+ assert_prio(RTEMS_SELF, 3 + k);
+ } else {
+ rtems_test_assert(sc == RTEMS_TIMEOUT);
+
+ ++ctx->counters[index].timeout;
+
+ break;
+ }
+
+ v = simple_random(v);
+ }
+
+ /* Release in reverse obtain order */
+ for (t = 0; t < s; ++t) {
+ uint32_t k = i + t - s + 1;
+
+ sc = rtems_semaphore_release(ctx->mrsp_ids[k]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+ }
+
+ v = simple_random(v);
+ }
+
+ sc = rtems_semaphore_release(ctx->counting_sem_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ rtems_task_suspend(RTEMS_SELF);
+ rtems_test_assert(0);
+}
+
+static void test_mrsp_load(void)
+{
+ test_context *ctx = &test_instance;
+ rtems_status_code sc;
+ uint32_t cpu_count = rtems_get_processor_count();
+ uint32_t index;
+
+ puts("test MrsP load");
+
+ assert_prio(RTEMS_SELF, 2);
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('S', 'Y', 'N', 'C'),
+ 0,
+ RTEMS_COUNTING_SEMAPHORE,
+ 0,
+ &ctx->counting_sem_id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ for (index = 0; index < MRSP_COUNT; ++index) {
+ sc = rtems_semaphore_create(
+ 'A' + index,
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 3 + index,
+ &ctx->mrsp_ids[index]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ for (index = 0; index < cpu_count; ++index) {
+ uint32_t a = 2 * index;
+ uint32_t b = a + 1;
+
+ sc = rtems_task_create(
+ 'A' + a,
+ 3 + MRSP_COUNT + a,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->worker_ids[a]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_set_scheduler(
+ ctx->worker_ids[a],
+ ctx->scheduler_ids[index]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_start(
+ ctx->worker_ids[a],
+ load_worker,
+ (rtems_task_argument) a
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_create(
+ 'A' + b,
+ 3 + MRSP_COUNT + b,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &ctx->worker_ids[b]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_set_scheduler(
+ ctx->worker_ids[b],
+ ctx->scheduler_ids[index]
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_start(
+ ctx->worker_ids[b],
+ load_worker,
+ (rtems_task_argument) b
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ sc = rtems_task_wake_after(30 * rtems_clock_get_ticks_per_second());
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ for (index = 0; index < 2 * cpu_count; ++index) {
+ ctx->stop_worker[index] = true;
+ }
+
+ for (index = 0; index < 2 * cpu_count; ++index) {
+ sc = rtems_semaphore_obtain(
+ ctx->counting_sem_id,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ for (index = 0; index < 2 * cpu_count; ++index) {
+ sc = rtems_task_delete(ctx->worker_ids[index]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ for (index = 0; index < MRSP_COUNT; ++index) {
+ sc = rtems_semaphore_delete(ctx->mrsp_ids[index]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ sc = rtems_semaphore_delete(ctx->counting_sem_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ for (index = 0; index < 2 * cpu_count; ++index) {
+ uint32_t nest_level;
+
+ printf(
+ "worker[%" PRIu32 "][%" PRIu32 "]\n"
+ " sleep = %" PRIu32 "\n"
+ " timeout = %" PRIu32 "\n",
+ index / 2,
+ index % 2,
+ ctx->counters[index].sleep,
+ ctx->counters[index].timeout
+ );
+
+ for (nest_level = 0; nest_level < MRSP_COUNT; ++nest_level) {
+ printf(
+ " obtain[%" PRIu32 "] = %" PRIu32 "\n",
+ nest_level,
+ ctx->counters[index].obtain[nest_level]
+ );
+ }
+ }
+}
+
+static void Init(rtems_task_argument arg)
+{
+ test_context *ctx = &test_instance;
+ rtems_status_code sc;
+ rtems_resource_snapshot snapshot;
+ uint32_t cpu_count = rtems_get_processor_count();
+ uint32_t cpu_index;
+
+ TEST_BEGIN();
+
+ rtems_resource_snapshot_take(&snapshot);
+
+ ctx->main_task_id = rtems_task_self();
+
+ for (cpu_index = 0; cpu_index < cpu_count; ++cpu_index) {
+ sc = rtems_scheduler_ident(cpu_index, &ctx->scheduler_ids[cpu_index]);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ }
+
+ test_mrsp_flush_error();
+ test_mrsp_initially_locked_error();
+ test_mrsp_nested_obtain_error();
+ test_mrsp_obtain_and_release();
+ test_mrsp_load();
+
+ rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
+
+ TEST_END();
+ rtems_test_exit(0);
+}
+
+#define CONFIGURE_SMP_APPLICATION
+
+#define CONFIGURE_MICROSECONDS_PER_TICK 1000
+
+#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
+#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
+
+#define CONFIGURE_MAXIMUM_TASKS (2 * CPU_COUNT + 1)
+#define CONFIGURE_MAXIMUM_SEMAPHORES (MRSP_COUNT + 1)
+#define CONFIGURE_MAXIMUM_MRSP_SEMAPHORES MRSP_COUNT
+#define CONFIGURE_MAXIMUM_TIMERS 1
+
+#define CONFIGURE_SMP_MAXIMUM_PROCESSORS CPU_COUNT
+
+#define CONFIGURE_SCHEDULER_SIMPLE_SMP
+
+#include <rtems/scheduler.h>
+
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(0);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(1);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(2);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(3);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(4);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(5);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(6);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(7);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(8);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(9);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(10);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(11);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(12);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(13);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(14);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(15);
+RTEMS_SCHEDULER_CONTEXT_SIMPLE_SMP(16);
+
+#define CONFIGURE_SCHEDULER_CONTROLS \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(0, 0), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(1, 1), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(2, 2), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(3, 3), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(4, 4), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(5, 5), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(6, 6), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(7, 7), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(8, 8), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(9, 9), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(10, 10), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(11, 11), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(12, 12), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(13, 13), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(14, 14), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(15, 15), \
+ RTEMS_SCHEDULER_CONTROL_SIMPLE_SMP(16, 16)
+
+#define CONFIGURE_SMP_SCHEDULER_ASSIGNMENTS \
+ RTEMS_SCHEDULER_ASSIGN(0, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
+ RTEMS_SCHEDULER_ASSIGN(1, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_MANDATORY), \
+ RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(2, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(3, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(4, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(5, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(6, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(7, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(8, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(9, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(10, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(11, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(12, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(13, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(14, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(15, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL), \
+ RTEMS_SCHEDULER_ASSIGN(16, RTEMS_SCHEDULER_ASSIGN_PROCESSOR_OPTIONAL)
+
+#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
+
+#define CONFIGURE_INIT_TASK_PRIORITY 2
+
+#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
+
+#define CONFIGURE_INIT
+
+#include <rtems/confdefs.h>
diff --git a/testsuites/smptests/smpmrsp01/smpmrsp01.doc b/testsuites/smptests/smpmrsp01/smpmrsp01.doc
new file mode 100644
index 0000000000..85badfd8e5
--- /dev/null
+++ b/testsuites/smptests/smpmrsp01/smpmrsp01.doc
@@ -0,0 +1,15 @@
+This file describes the directives and concepts tested by this test set.
+
+test set name: smpmrsp01
+
+directives:
+
+ - _MRSP_Initialize()
+ - _MRSP_Obtain()
+ - _MRSP_Release()
+ - _MRSP_Get_ceiling_priority()
+ - _MRSP_Set_ceiling_priority()
+
+concepts:
+
+ - Ensure that MrsP semaphores work.
diff --git a/testsuites/smptests/smpmrsp01/smpmrsp01.scn b/testsuites/smptests/smpmrsp01/smpmrsp01.scn
new file mode 100644
index 0000000000..5762a02c8c
--- /dev/null
+++ b/testsuites/smptests/smpmrsp01/smpmrsp01.scn
@@ -0,0 +1,147 @@
+*** BEGIN OF TEST SMPMRSP 1 ***
+test MrsP flush error
+test MrsP initially locked error
+test MrsP nested obtain error
+test MrsP obtain and release
+test MrsP load
+worker[0][0]
+ sleep = 890
+ timeout = 1455
+ obtain[0] = 141069
+ obtain[1] = 111062
+ obtain[2] = 255631
+ obtain[3] = 186559
+ obtain[4] = 310707
+ obtain[5] = 246838
+ obtain[6] = 331853
+ obtain[7] = 298938
+ obtain[8] = 331989
+ obtain[9] = 343041
+ obtain[10] = 310191
+ obtain[11] = 381001
+ obtain[12] = 269001
+ obtain[13] = 412849
+ obtain[14] = 217768
+ obtain[15] = 444036
+ obtain[16] = 160721
+ obtain[17] = 476211
+ obtain[18] = 151929
+ obtain[19] = 438664
+ obtain[20] = 132708
+ obtain[21] = 388090
+ obtain[22] = 118166
+ obtain[23] = 337468
+ obtain[24] = 96676
+ obtain[25] = 271392
+ obtain[26] = 75445
+ obtain[27] = 203259
+ obtain[28] = 52933
+ obtain[29] = 132769
+ obtain[30] = 27856
+ obtain[31] = 57014
+worker[0][1]
+ sleep = 15
+ timeout = 33
+ obtain[0] = 2241
+ obtain[1] = 1890
+ obtain[2] = 4128
+ obtain[3] = 3128
+ obtain[4] = 5110
+ obtain[5] = 3981
+ obtain[6] = 5348
+ obtain[7] = 4825
+ obtain[8] = 5184
+ obtain[9] = 5720
+ obtain[10] = 4488
+ obtain[11] = 6038
+ obtain[12] = 4095
+ obtain[13] = 6658
+ obtain[14] = 3754
+ obtain[15] = 6768
+ obtain[16] = 2654
+ obtain[17] = 7051
+ obtain[18] = 2679
+ obtain[19] = 6956
+ obtain[20] = 2498
+ obtain[21] = 6173
+ obtain[22] = 2024
+ obtain[23] = 5514
+ obtain[24] = 1650
+ obtain[25] = 4141
+ obtain[26] = 1568
+ obtain[27] = 3285
+ obtain[28] = 812
+ obtain[29] = 2317
+ obtain[30] = 527
+ obtain[31] = 996
+worker[1][0]
+ sleep = 890
+ timeout = 1581
+ obtain[0] = 140732
+ obtain[1] = 111655
+ obtain[2] = 256936
+ obtain[3] = 186534
+ obtain[4] = 311714
+ obtain[5] = 248065
+ obtain[6] = 333155
+ obtain[7] = 300734
+ obtain[8] = 329675
+ obtain[9] = 343832
+ obtain[10] = 309112
+ obtain[11] = 380452
+ obtain[12] = 270156
+ obtain[13] = 416600
+ obtain[14] = 223484
+ obtain[15] = 444991
+ obtain[16] = 163750
+ obtain[17] = 476096
+ obtain[18] = 150317
+ obtain[19] = 432827
+ obtain[20] = 133946
+ obtain[21] = 388441
+ obtain[22] = 119760
+ obtain[23] = 337033
+ obtain[24] = 99153
+ obtain[25] = 271558
+ obtain[26] = 77535
+ obtain[27] = 202607
+ obtain[28] = 53225
+ obtain[29] = 130801
+ obtain[30] = 27321
+ obtain[31] = 56239
+worker[1][1]
+ sleep = 25
+ timeout = 48
+ obtain[0] = 2164
+ obtain[1] = 1722
+ obtain[2] = 4095
+ obtain[3] = 3002
+ obtain[4] = 4950
+ obtain[5] = 4020
+ obtain[6] = 5235
+ obtain[7] = 4716
+ obtain[8] = 5407
+ obtain[9] = 5070
+ obtain[10] = 5082
+ obtain[11] = 6130
+ obtain[12] = 4368
+ obtain[13] = 6108
+ obtain[14] = 3270
+ obtain[15] = 6800
+ obtain[16] = 2652
+ obtain[17] = 7633
+ obtain[18] = 2451
+ obtain[19] = 7480
+ obtain[20] = 2079
+ obtain[21] = 6232
+ obtain[22] = 1590
+ obtain[23] = 5739
+ obtain[24] = 1627
+ obtain[25] = 4030
+ obtain[26] = 1296
+ obtain[27] = 2803
+ obtain[28] = 969
+ obtain[29] = 2253
+ obtain[30] = 217
+ obtain[31] = 930
+*** END OF TEST SMPMRSP 1 ***
diff --git a/testsuites/sptests/Makefile.am b/testsuites/sptests/Makefile.am
index ef25b25568..cc5ed2628c 100644
--- a/testsuites/sptests/Makefile.am
+++ b/testsuites/sptests/Makefile.am
@@ -37,6 +37,7 @@ if HAS_SMP
else
_SUBDIRS += sp29
endif
+_SUBDIRS += spmrsp01
_SUBDIRS += spscheduler01
_SUBDIRS += spprofiling01
_SUBDIRS += spfatal28
diff --git a/testsuites/sptests/configure.ac b/testsuites/sptests/configure.ac
index b40d4dd6fd..ebc81ed94d 100644
--- a/testsuites/sptests/configure.ac
+++ b/testsuites/sptests/configure.ac
@@ -40,6 +40,7 @@ AM_CONDITIONAL(HAS_SMP,test "$rtems_cv_RTEMS_SMP" = "yes")
# Explicitly list all Makefiles here
AC_CONFIG_FILES([Makefile
+spmrsp01/Makefile
spscheduler01/Makefile
spfatal28/Makefile
spthreadlife01/Makefile
diff --git a/testsuites/sptests/spmrsp01/Makefile.am b/testsuites/sptests/spmrsp01/Makefile.am
new file mode 100644
index 0000000000..122b2a9018
--- /dev/null
+++ b/testsuites/sptests/spmrsp01/Makefile.am
@@ -0,0 +1,19 @@
+rtems_tests_PROGRAMS = spmrsp01
+spmrsp01_SOURCES = init.c
+
+dist_rtems_tests_DATA = spmrsp01.scn spmrsp01.doc
+
+include $(RTEMS_ROOT)/make/custom/@RTEMS_BSP@.cfg
+include $(top_srcdir)/../automake/compile.am
+include $(top_srcdir)/../automake/leaf.am
+
+AM_CPPFLAGS += -I$(top_srcdir)/../support/include
+
+LINK_OBJS = $(spmrsp01_OBJECTS)
+LINK_LIBS = $(spmrsp01_LDLIBS)
+
+spmrsp01$(EXEEXT): $(spmrsp01_OBJECTS) $(spmrsp01_DEPENDENCIES)
+ @rm -f spmrsp01$(EXEEXT)
+ $(make-exe)
+
+include $(top_srcdir)/../automake/local.am
diff --git a/testsuites/sptests/spmrsp01/init.c b/testsuites/sptests/spmrsp01/init.c
new file mode 100644
index 0000000000..d8da7871b1
--- /dev/null
+++ b/testsuites/sptests/spmrsp01/init.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifdef HAVE_CONFIG_H
+ #include "config.h"
+#endif
+
+#include <rtems.h>
+#include <rtems/libcsupport.h>
+
+#include "tmacros.h"
+
+const char rtems_test_name[] = "SPMRSP 1";
+
+typedef struct {
+ rtems_id semaphore_id;
+ rtems_id task_id;
+} test_mrsp_context;
+
+static void create_not_defined(rtems_attribute attr)
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ attr,
+ 0,
+ &id
+ );
+ rtems_test_assert(sc == RTEMS_NOT_DEFINED);
+}
+
+static void test_mrsp_create_errors(void)
+{
+ puts("test MrsP create errors");
+
+ create_not_defined(
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_COUNTING_SEMAPHORE
+ );
+
+ create_not_defined(
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_SIMPLE_BINARY_SEMAPHORE
+ );
+
+ create_not_defined(
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE
+ | RTEMS_PRIORITY
+ );
+
+ create_not_defined(
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_INHERIT_PRIORITY
+ | RTEMS_BINARY_SEMAPHORE
+ );
+
+ create_not_defined(
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_PRIORITY_CEILING
+ | RTEMS_BINARY_SEMAPHORE
+ );
+
+ create_not_defined(
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_INHERIT_PRIORITY
+ | RTEMS_PRIORITY_CEILING
+ | RTEMS_BINARY_SEMAPHORE
+ );
+}
+
+static void assert_prio(rtems_task_priority expected_prio)
+{
+ rtems_status_code sc;
+ rtems_task_priority prio;
+
+ sc = rtems_task_set_priority(RTEMS_SELF, RTEMS_CURRENT_PRIORITY, &prio);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == expected_prio);
+}
+
+static void test_mrsp_obtain_release(void)
+{
+ rtems_status_code sc;
+ rtems_id id;
+
+ puts("test MrsP obtain and release");
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(2);
+
+ sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(1);
+
+ sc = rtems_semaphore_delete(id);
+ rtems_test_assert(sc == RTEMS_RESOURCE_IN_USE);
+
+ sc = rtems_semaphore_release(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ assert_prio(2);
+
+ sc = rtems_semaphore_delete(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void test_mrsp_set_priority_errors(void)
+{
+ rtems_status_code sc;
+ rtems_id id;
+ rtems_id scheduler_id;
+ rtems_task_priority prio;
+
+ puts("test MrsP set priority errors");
+
+ sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('C', 'O', 'N', 'T'),
+ 0,
+ RTEMS_COUNTING_SEMAPHORE,
+ 0,
+ &id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ prio = 1;
+ sc = rtems_semaphore_set_priority(RTEMS_ID_NONE, scheduler_id, prio, &prio);
+ rtems_test_assert(sc == RTEMS_INVALID_ID);
+
+ prio = 1;
+ sc = rtems_semaphore_set_priority(id, RTEMS_ID_NONE, prio, &prio);
+ rtems_test_assert(sc == RTEMS_INVALID_ID);
+
+ prio = 0xffffffff;
+ sc = rtems_semaphore_set_priority(id, scheduler_id, prio, &prio);
+ rtems_test_assert(sc == RTEMS_INVALID_PRIORITY);
+
+ prio = 1;
+ sc = rtems_semaphore_set_priority(id, scheduler_id, prio, NULL);
+ rtems_test_assert(sc == RTEMS_INVALID_ADDRESS);
+
+ prio = 1;
+ sc = rtems_semaphore_set_priority(id, scheduler_id, prio, &prio);
+ rtems_test_assert(sc == RTEMS_NOT_DEFINED);
+
+ sc = rtems_semaphore_delete(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void test_mrsp_set_priority(void)
+{
+ rtems_status_code sc;
+ rtems_id id;
+ rtems_id scheduler_id;
+ rtems_task_priority prio;
+
+ puts("test MrsP set priority");
+
+ sc = rtems_task_get_scheduler(RTEMS_SELF, &scheduler_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ prio = RTEMS_CURRENT_PRIORITY;
+ sc = rtems_semaphore_set_priority(id, scheduler_id, prio, &prio);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 1);
+
+ prio = 1;
+ sc = rtems_semaphore_set_priority(id, scheduler_id, prio, &prio);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 1);
+
+ prio = 2;
+ sc = rtems_semaphore_set_priority(id, scheduler_id, prio, &prio);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 1);
+
+ prio = RTEMS_CURRENT_PRIORITY;
+ sc = rtems_semaphore_set_priority(id, scheduler_id, prio, &prio);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+ rtems_test_assert(prio == 2);
+
+ sc = rtems_semaphore_delete(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void test_mrsp_task(rtems_task_argument arg)
+{
+ test_mrsp_context *ctx = (test_mrsp_context *) arg;
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_release(ctx->semaphore_id);
+ rtems_test_assert(sc == RTEMS_NOT_OWNER_OF_RESOURCE);
+
+ sc = rtems_semaphore_obtain(ctx->semaphore_id, RTEMS_NO_WAIT, 0);
+ rtems_test_assert(sc == RTEMS_UNSATISFIED);
+
+ sc = rtems_semaphore_obtain(ctx->semaphore_id, RTEMS_WAIT, 1);
+ rtems_test_assert(sc == RTEMS_TIMEOUT);
+
+ sc = rtems_event_transient_send(ctx->task_id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ rtems_task_delete(RTEMS_SELF);
+ rtems_test_assert(0);
+}
+
+static void test_mrsp_timeout_and_not_owner_of_resource(void)
+{
+ rtems_status_code sc;
+ rtems_id id;
+ rtems_id task_id;
+ test_mrsp_context ctx;
+
+ puts("test MrsP timeout and not owner of resource");
+
+ sc = rtems_semaphore_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ RTEMS_MULTIPROCESSOR_RESOURCE_SHARING
+ | RTEMS_BINARY_SEMAPHORE,
+ 1,
+ &id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_obtain(id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_task_create(
+ rtems_build_name('M', 'R', 'S', 'P'),
+ 1,
+ RTEMS_MINIMUM_STACK_SIZE,
+ RTEMS_DEFAULT_MODES,
+ RTEMS_DEFAULT_ATTRIBUTES,
+ &task_id
+ );
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ ctx.semaphore_id = id;
+ ctx.task_id = rtems_task_self();
+
+ sc = rtems_task_start(task_id, test_mrsp_task, (rtems_task_argument) &ctx);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_release(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+
+ sc = rtems_semaphore_delete(id);
+ rtems_test_assert(sc == RTEMS_SUCCESSFUL);
+}
+
+static void Init(rtems_task_argument arg)
+{
+ rtems_resource_snapshot snapshot;
+
+ TEST_BEGIN();
+
+ rtems_resource_snapshot_take(&snapshot);
+
+ test_mrsp_create_errors();
+ test_mrsp_obtain_release();
+ test_mrsp_set_priority_errors();
+ test_mrsp_set_priority();
+ test_mrsp_timeout_and_not_owner_of_resource();
+
+ rtems_test_assert(rtems_resource_snapshot_check(&snapshot));
+
+ TEST_END();
+ rtems_test_exit(0);
+}
+
+#define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
+#define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
+
+#define CONFIGURE_USE_IMFS_AS_BASE_FILESYSTEM
+
+#define CONFIGURE_MAXIMUM_TASKS 2
+#define CONFIGURE_MAXIMUM_SEMAPHORES 1
+#define CONFIGURE_MAXIMUM_MRSP_SEMAPHORES 1
+
+#define CONFIGURE_INIT_TASK_PRIORITY 2
+
+#define CONFIGURE_INITIAL_EXTENSIONS RTEMS_TEST_INITIAL_EXTENSION
+
+#define CONFIGURE_RTEMS_INIT_TASKS_TABLE
+
+#define CONFIGURE_INIT
+
+#include <rtems/confdefs.h>
diff --git a/testsuites/sptests/spmrsp01/spmrsp01.doc b/testsuites/sptests/spmrsp01/spmrsp01.doc
new file mode 100644
index 0000000000..e09cff11c7
--- /dev/null
+++ b/testsuites/sptests/spmrsp01/spmrsp01.doc
@@ -0,0 +1,13 @@
+This file describes the directives and concepts tested by this test set.
+
+test set name: spmrsp01
+
+directives:
+
+ - rtems_semaphore_create()
+ - rtems_semaphore_set_priority()
+
+concepts:
+
+ - Ensure that the RTEMS_MULTIPROCESSOR_RESOURCE_SHARING attribute and
+ semaphores work on uni-processor configurations.
diff --git a/testsuites/sptests/spmrsp01/spmrsp01.scn b/testsuites/sptests/spmrsp01/spmrsp01.scn
new file mode 100644
index 0000000000..29b616d217
--- /dev/null
+++ b/testsuites/sptests/spmrsp01/spmrsp01.scn
@@ -0,0 +1,7 @@
+*** BEGIN OF TEST SPMRSP 1 ***
+test MRSP create errors
+test MRSP obtain and release
+test MRSP set priority errors
+test MRSP set priority
+test MRSP timeout and not owner of resource
+*** END OF TEST SPMRSP 1 ***